Compare commits

..

1 Commits

Author SHA1 Message Date
William Fu-Hinthorn
5f632f8c11 [Core] Expand typing for RunnableLike
Include callables whose second argument is a RunnableConfig
2024-06-26 12:49:05 -07:00
2467 changed files with 54902 additions and 124851 deletions

View File

@@ -5,10 +5,10 @@ services:
dockerfile: libs/langchain/dev.Dockerfile
context: ..
volumes:
# Update this to wherever you want VS Code to mount the folder of your project
# Update this to wherever you want VS Code to mount the folder of your project
- ..:/workspaces/langchain:cached
networks:
- langchain-network
- langchain-network
# environment:
# MONGO_ROOT_USERNAME: root
# MONGO_ROOT_PASSWORD: example123
@@ -28,3 +28,5 @@ services:
networks:
langchain-network:
driver: bridge

View File

@@ -4,6 +4,9 @@ contact_links:
- name: 🤔 Question or Problem
about: Ask a question or ask about a problem in GitHub Discussions.
url: https://www.github.com/langchain-ai/langchain/discussions/categories/q-a
- name: Discord
url: https://discord.gg/6adMQxSpJS
about: General community discussions
- name: Feature Request
url: https://www.github.com/langchain-ai/langchain/discussions/categories/ideas
about: Suggest a feature or an idea

View File

@@ -350,7 +350,11 @@ def get_graphql_pr_edges(*, settings: Settings, after: Union[str, None] = None):
print("Querying PRs...")
else:
print(f"Querying PRs with cursor {after}...")
data = get_graphql_response(settings=settings, query=prs_query, after=after)
data = get_graphql_response(
settings=settings,
query=prs_query,
after=after
)
graphql_response = PRsResponse.model_validate(data)
return graphql_response.data.repository.pullRequests.edges
@@ -480,16 +484,10 @@ def get_contributors(settings: Settings):
lines_changed = pr.additions + pr.deletions
score = _logistic(files_changed, 20) + _logistic(lines_changed, 100)
contributor_scores[pr.author.login] += score
three_months_ago = datetime.now(timezone.utc) - timedelta(days=3 * 30)
three_months_ago = (datetime.now(timezone.utc) - timedelta(days=3*30))
if pr.createdAt > three_months_ago:
recent_contributor_scores[pr.author.login] += score
return (
contributors,
contributor_scores,
recent_contributor_scores,
reviewers,
authors,
)
return contributors, contributor_scores, recent_contributor_scores, reviewers, authors
def get_top_users(
@@ -526,13 +524,9 @@ if __name__ == "__main__":
# question_commentors, question_last_month_commentors, question_authors = get_experts(
# settings=settings
# )
(
contributors,
contributor_scores,
recent_contributor_scores,
reviewers,
pr_authors,
) = get_contributors(settings=settings)
contributors, contributor_scores, recent_contributor_scores, reviewers, pr_authors = get_contributors(
settings=settings
)
# authors = {**question_authors, **pr_authors}
authors = {**pr_authors}
maintainers_logins = {
@@ -553,7 +547,6 @@ if __name__ == "__main__":
"obi1kenobi",
"langchain-infra",
"jacoblee93",
"isahers1",
"dqbd",
"bracesproul",
"akira",
@@ -565,7 +558,7 @@ if __name__ == "__main__":
maintainers.append(
{
"login": login,
"count": contributors[login], # + question_commentors[login],
"count": contributors[login], #+ question_commentors[login],
"avatarUrl": user.avatarUrl,
"twitterUsername": user.twitterUsername,
"url": user.url,
@@ -621,7 +614,9 @@ if __name__ == "__main__":
new_people_content = yaml.dump(
people, sort_keys=False, width=200, allow_unicode=True
)
if people_old_content == new_people_content:
if (
people_old_content == new_people_content
):
logging.info("The LangChain People data hasn't changed, finishing.")
sys.exit(0)
people_path.write_text(new_people_content, encoding="utf-8")
@@ -634,7 +629,9 @@ if __name__ == "__main__":
logging.info(f"Creating a new branch {branch_name}")
subprocess.run(["git", "checkout", "-B", branch_name], check=True)
logging.info("Adding updated file")
subprocess.run(["git", "add", str(people_path)], check=True)
subprocess.run(
["git", "add", str(people_path)], check=True
)
logging.info("Committing updated file")
message = "👥 Update LangChain people data"
result = subprocess.run(["git", "commit", "-m", message], check=True)
@@ -643,4 +640,4 @@ if __name__ == "__main__":
logging.info("Creating PR")
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
logging.info(f"Created PR: {pr.number}")
logging.info("Finished")
logging.info("Finished")

View File

@@ -1,12 +1,11 @@
import glob
import json
import os
import sys
import os
from typing import Dict, List, Set
import tomllib
from collections import defaultdict
from typing import Dict, List, Set
from pathlib import Path
import glob
LANGCHAIN_DIRS = [
"libs/core",
@@ -16,58 +15,22 @@ LANGCHAIN_DIRS = [
"libs/experimental",
]
def all_package_dirs() -> Set[str]:
return {
"/".join(path.split("/")[:-1]).lstrip("./")
for path in glob.glob("./libs/**/pyproject.toml", recursive=True)
if "libs/cli" not in path and "libs/standard-tests" not in path
}
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
def dependents_graph() -> dict:
"""
Construct a mapping of package -> dependents, such that we can
run tests on all dependents of a package when a change is made.
"""
dependents = defaultdict(set)
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
if "template" in path:
continue
# load regular and test deps from pyproject.toml
with open(path, "rb") as f:
pyproject = tomllib.load(f)["tool"]["poetry"]
pyproject = tomllib.load(f)['tool']['poetry']
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
for dep in [
*pyproject["dependencies"].keys(),
*pyproject["group"]["test"]["dependencies"].keys(),
]:
for dep in pyproject['dependencies']:
if "langchain" in dep:
dependents[dep].add(pkg_dir)
continue
# load extended deps from extended_testing_deps.txt
package_path = Path(path).parent
extended_requirement_path = package_path / "extended_testing_deps.txt"
if extended_requirement_path.exists():
with open(extended_requirement_path, "r") as f:
extended_deps = f.read().splitlines()
for depline in extended_deps:
if depline.startswith("-e "):
# editable dependency
assert depline.startswith(
"-e ../partners/"
), "Extended test deps should only editable install partner packages"
partner = depline.split("partners/")[1]
dep = f"langchain-{partner}"
else:
dep = depline.split("==")[0]
if "langchain" in dep:
dependents[dep].add(pkg_dir)
return dependents
@@ -84,58 +47,6 @@ def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
return list(updated)
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
if dir_ == "libs/core":
return [
{"working-directory": dir_, "python-version": f"3.{v}"}
for v in range(8, 13)
]
min_python = "3.8"
max_python = "3.12"
# custom logic for specific directories
if dir_ == "libs/partners/milvus":
# milvus poetry doesn't allow 3.12 because they
# declare deps in funny way
max_python = "3.11"
if dir_ in ["libs/community", "libs/langchain"] and job == "extended-tests":
# community extended test resolution in 3.12 is slow
# even in uv
max_python = "3.11"
if dir_ == "libs/community" and job == "compile-integration-tests":
# community integration deps are slow in 3.12
max_python = "3.11"
return [
{"working-directory": dir_, "python-version": min_python},
{"working-directory": dir_, "python-version": max_python},
]
def _get_configs_for_multi_dirs(
job: str, dirs_to_run: List[str], dependents: dict
) -> List[Dict[str, str]]:
if job == "lint":
dirs = add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
dependents,
)
elif job in ["test", "compile-integration-tests", "dependencies"]:
dirs = add_dependents(
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
)
elif job == "extended-tests":
dirs = list(dirs_to_run["extended-test"])
else:
raise ValueError(f"Unknown job: {job}")
return [
config for dir_ in dirs for config in _get_configs_for_single_dir(job, dir_)
]
if __name__ == "__main__":
files = sys.argv[1:]
@@ -209,23 +120,14 @@ if __name__ == "__main__":
dependents = dependents_graph()
# we now have dirs_by_job
# todo: clean this up
map_job_to_configs = {
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
for job in [
"lint",
"test",
"extended-tests",
"compile-integration-tests",
"dependencies",
]
outputs = {
"dirs-to-lint": add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
),
"dirs-to-test": add_dependents(dirs_to_run["test"] | dirs_to_run["extended-test"], dependents),
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
"docs-edited": "true" if docs_edited else "",
}
map_job_to_configs["test-doc-imports"] = (
[{"python-version": "3.12"}] if docs_edited else []
)
for key, value in map_job_to_configs.items():
for key, value in outputs.items():
json_output = json.dumps(value)
print(f"{key}={json_output}")

View File

@@ -1,35 +0,0 @@
import sys
import tomllib
if __name__ == "__main__":
# Get the TOML file path from the command line argument
toml_file = sys.argv[1]
# read toml file
with open(toml_file, "rb") as file:
toml_data = tomllib.load(file)
# see if we're releasing an rc
version = toml_data["tool"]["poetry"]["version"]
releasing_rc = "rc" in version
# if not, iterate through dependencies and make sure none allow prereleases
if not releasing_rc:
dependencies = toml_data["tool"]["poetry"]["dependencies"]
for lib in dependencies:
dep_version = dependencies[lib]
dep_version_string = (
dep_version["version"] if isinstance(dep_version, dict) else dep_version
)
if "rc" in dep_version_string:
raise ValueError(
f"Dependency {lib} has a prerelease version. Please remove this."
)
if isinstance(dep_version, dict) and dep_version.get(
"allow-prereleases", False
):
raise ValueError(
f"Dependency {lib} has allow-prereleases set to true. Please remove this."
)

View File

@@ -1,11 +1,6 @@
import sys
if sys.version_info >= (3, 11):
import tomllib
else:
# for python 3.10 and below, which doesnt have stdlib tomllib
import tomli as tomllib
import tomllib
from packaging.version import parse as parse_version
import re
@@ -14,11 +9,8 @@ MIN_VERSION_LIBS = [
"langchain-community",
"langchain",
"langchain-text-splitters",
"SQLAlchemy",
]
SKIP_IF_PULL_REQUEST = ["langchain-core"]
def get_min_version(version: str) -> str:
# base regex for x.x.x with cases for rc/post/etc
@@ -45,7 +37,7 @@ def get_min_version(version: str) -> str:
raise ValueError(f"Unrecognized version format: {version}")
def get_min_version_from_toml(toml_path: str, versions_for: str):
def get_min_version_from_toml(toml_path: str):
# Parse the TOML file
with open(toml_path, "rb") as file:
toml_data = tomllib.load(file)
@@ -58,10 +50,6 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
# Iterate over the libs in MIN_VERSION_LIBS
for lib in MIN_VERSION_LIBS:
if versions_for == "pull_request" and lib in SKIP_IF_PULL_REQUEST:
# some libs only get checked on release because of simultaneous
# changes
continue
# Check if the lib is present in the dependencies
if lib in dependencies:
# Get the version string
@@ -82,10 +70,10 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
if __name__ == "__main__":
# Get the TOML file path from the command line argument
toml_file = sys.argv[1]
versions_for = sys.argv[2]
assert versions_for in ["release", "pull_request"]
# Call the function to get the minimum versions
min_versions = get_min_version_from_toml(toml_file, versions_for)
min_versions = get_min_version_from_toml(toml_file)
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
print(
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
)

View File

@@ -7,10 +7,6 @@ on:
required: true
type: string
description: "From which folder this pipeline executes"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -21,14 +17,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: compile-integration

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,14 +21,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: dependency checks ${{ inputs.python-version }}
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: dependency checks ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: pydantic-cross-compat

View File

@@ -6,10 +6,6 @@ on:
working-directory:
required: true
type: string
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -20,14 +16,19 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: Python ${{ inputs.python-version }}
strategy:
matrix:
python-version:
- "3.8"
- "3.11"
name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,15 +21,27 @@ env:
jobs:
build:
name: "make lint #${{ inputs.python-version }}"
name: "make lint #${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
matrix:
# Only lint on the min and max supported Python versions.
# It's extremely unlikely that there's a lint issue on any version in between
# that doesn't show up on the min or max versions.
#
# GitHub rate-limits how many jobs can be running at any one time.
# Starting new jobs is also relatively slow,
# so linting on fewer versions makes CI faster.
python-version:
- "3.8"
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: lint-with-extras
@@ -78,7 +86,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
- name: Analysing the code with our lint
@@ -112,7 +120,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache_test
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
- name: Analysing the code with our lint
working-directory: ${{ inputs.working-directory }}

View File

@@ -122,6 +122,7 @@ jobs:
fi
{
echo 'release-body<<EOF'
echo "# Release $TAG"
echo $PREAMBLE
echo
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
@@ -134,7 +135,6 @@ jobs:
- release-notes
uses:
./.github/workflows/_test_release.yml
permissions: write-all
with:
working-directory: ${{ inputs.working-directory }}
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}
@@ -189,7 +189,7 @@ jobs:
--extra-index-url https://test.pypi.org/simple/ \
"$PKG_NAME==$VERSION" || \
( \
sleep 15 && \
sleep 5 && \
poetry run pip install \
--extra-index-url https://test.pypi.org/simple/ \
"$PKG_NAME==$VERSION" \
@@ -221,17 +221,12 @@ jobs:
run: make tests
working-directory: ${{ inputs.working-directory }}
- name: Check for prerelease versions
working-directory: ${{ inputs.working-directory }}
run: |
poetry run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
- name: Get minimum versions
working-directory: ${{ inputs.working-directory }}
id: min-version
run: |
poetry run pip install packaging
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"
@@ -290,7 +285,6 @@ jobs:
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
UNSTRUCTURED_API_KEY: ${{ secrets.UNSTRUCTURED_API_KEY }}
run: make integration_tests
working-directory: ${{ inputs.working-directory }}

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,14 +21,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "make test #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: "make test #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core
@@ -65,22 +69,3 @@ jobs:
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'
- name: Get minimum versions
working-directory: ${{ inputs.working-directory }}
id: min-version
run: |
poetry run pip install packaging tomli
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"
# Temporarily disabled until we can get the minimum versions working
# - name: Run unit tests with minimum dependency versions
# if: ${{ steps.min-version.outputs.min-versions != '' }}
# env:
# MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
# run: |
# poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
# make tests
# working-directory: ${{ inputs.working-directory }}

View File

@@ -2,11 +2,6 @@ name: test_doc_imports
on:
workflow_call:
inputs:
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -14,14 +9,18 @@ env:
jobs:
build:
runs-on: ubuntu-latest
name: "check doc imports #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.12"
name: "check doc imports #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: core

View File

@@ -33,96 +33,91 @@ jobs:
run: |
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
outputs:
lint: ${{ steps.set-matrix.outputs.lint }}
test: ${{ steps.set-matrix.outputs.test }}
extended-tests: ${{ steps.set-matrix.outputs.extended-tests }}
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
lint:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.lint != '[]' }}
if: ${{ needs.build.outputs.dirs-to-lint != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-lint) }}
uses: ./.github/workflows/_lint.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.test != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test-doc-imports:
needs: [ build ]
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
with:
python-version: ${{ matrix.job-configs.python-version }}
compile-integration-tests:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.compile-integration-tests != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_compile_integration_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
dependencies:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.dependencies != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_dependencies.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
extended-tests:
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
needs: [ build ]
if: ${{ needs.build.outputs.extended-tests != '[]' }}
if: ${{ needs.build.outputs.dirs-to-extended-test != '[]' }}
strategy:
matrix:
# note different variable for extended test dirs
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-extended-test) }}
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
runs-on: ubuntu-latest
defaults:
run:
working-directory: ${{ matrix.job-configs.working-directory }}
working-directory: ${{ matrix.working-directory }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.job-configs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.job-configs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ matrix.job-configs.working-directory }}
working-directory: ${{ matrix.working-directory }}
cache-key: extended
- name: Install dependencies

View File

@@ -26,11 +26,6 @@ jobs:
python-version: '3.10'
- id: files
uses: Ana06/get-changed-files@v2.2.0
with:
filter: |
*.ipynb
*.md
*.mdx
- name: Check new docs
run: |
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}

View File

@@ -16,7 +16,6 @@ jobs:
langchain-people:
if: github.repository_owner == 'langchain-ai'
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Dump GitHub context
env:

View File

@@ -27,6 +27,7 @@ jobs:
- "libs/partners/groq"
- "libs/partners/mistralai"
- "libs/partners/together"
- "libs/partners/cohere"
- "libs/partners/google-vertexai"
- "libs/partners/google-genai"
- "libs/partners/aws"
@@ -39,6 +40,10 @@ jobs:
with:
repository: langchain-ai/langchain-google
path: langchain-google
- uses: actions/checkout@v4
with:
repository: langchain-ai/langchain-cohere
path: langchain-cohere
- uses: actions/checkout@v4
with:
repository: langchain-ai/langchain-aws
@@ -48,9 +53,11 @@ jobs:
run: |
rm -rf \
langchain/libs/partners/google-genai \
langchain/libs/partners/google-vertexai
langchain/libs/partners/google-vertexai \
langchain/libs/partners/cohere
mv langchain-google/libs/genai langchain/libs/partners/google-genai
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
mv langchain-aws/libs/aws langchain/libs/partners/aws
- name: Set up Python ${{ matrix.python-version }}
@@ -109,6 +116,7 @@ jobs:
rm -rf \
langchain/libs/partners/google-genai \
langchain/libs/partners/google-vertexai \
langchain/libs/partners/cohere \
langchain/libs/partners/aws
- name: Ensure the tests did not create any additional files

2
.gitignore vendored
View File

@@ -172,8 +172,6 @@ docs/api_reference/*/
!docs/api_reference/_static/
!docs/api_reference/templates/
!docs/api_reference/themes/
!docs/api_reference/_extensions/
!docs/api_reference/scripts/
docs/docs/build
docs/docs/node_modules
docs/docs/yarn.lock

View File

@@ -52,7 +52,7 @@ Now:
`from langchain_experimental.sql import SQLDatabaseChain`
Alternatively, if you are just interested in using the query generation part of the SQL chain, you can check out this [`SQL question-answering tutorial`](https://python.langchain.com/v0.2/docs/tutorials/sql_qa/#convert-question-to-sql-query)
Alternatively, if you are just interested in using the query generation part of the SQL chain, you can check out [`create_sql_query_chain`](https://github.com/langchain-ai/langchain/blob/master/docs/extras/use_cases/tabular/sql_query.ipynb)
`from langchain.chains import create_sql_query_chain`

View File

@@ -31,7 +31,6 @@ docs_linkcheck:
api_docs_build:
poetry run python docs/api_reference/create_api_rst.py
cd docs/api_reference && poetry run make html
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
API_PKG ?= text-splitters
@@ -39,14 +38,12 @@ api_docs_quick_preview:
poetry run pip install "pydantic<2"
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
cd docs/api_reference && poetry run make html
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
open docs/api_reference/_build/html/reference.html
open docs/api_reference/_build/html/$(shell echo $(API_PKG) | sed 's/-/_/g')_api_reference.html
## api_docs_clean: Clean the API Reference documentation build artifacts.
api_docs_clean:
find ./docs/api_reference -name '*_api_reference.rst' -delete
git clean -fdX ./docs/api_reference
rm docs/api_reference/index.md
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.

View File

@@ -7,9 +7,11 @@
[![PyPI - License](https://img.shields.io/pypi/l/langchain-core?style=flat-square)](https://opensource.org/licenses/MIT)
[![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-core?style=flat-square)](https://pypistats.org/packages/langchain-core)
[![GitHub star chart](https://img.shields.io/github/stars/langchain-ai/langchain?style=flat-square)](https://star-history.com/#langchain-ai/langchain)
[![Dependency Status](https://img.shields.io/librariesio/github/langchain-ai/langchain?style=flat-square)](https://libraries.io/github/langchain-ai/langchain)
[![Open Issues](https://img.shields.io/github/issues-raw/langchain-ai/langchain?style=flat-square)](https://github.com/langchain-ai/langchain/issues)
[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langchain-ai/langchain)
[![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
@@ -36,25 +38,24 @@ conda install langchain -c conda-forge
For these applications, LangChain simplifies the entire application lifecycle:
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
### Open-source libraries
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
- **`langchain-community`**: Third party integrations.
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
### Productionization:
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
### Deployment:
- **[LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](docs/static/svg/langchain_stack_062024.svg "LangChain Architecture Overview")
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](docs/static/svg/langchain_stack.svg "LangChain Architecture Overview")
## 🧱 What can you build with LangChain?
@@ -105,7 +106,7 @@ Retrieval Augmented Generation involves [loading data](https://python.langchain.
**🤖 Agents**
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents), along with [LangGraph](https://github.com/langchain-ai/langgraph) for building custom agents.
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
## 📖 Documentation
@@ -119,9 +120,10 @@ Please see [here](https://python.langchain.com) for full documentation, which in
## 🌐 Ecosystem
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs.
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
## 💁 Contributing

View File

@@ -64,7 +64,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
]
},
{
@@ -355,7 +355,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -37,7 +37,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -U --quiet langchain langchain-chroma langchain-community openai langchain-experimental\n",
"%pip install -U --quiet langchain langchain_community openai chromadb langchain-experimental\n",
"%pip install --quiet \"unstructured[all-docs]\" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken"
]
},
@@ -344,8 +344,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import VertexAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub langchain-chroma langchain-anthropic"
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub chromadb langchain-anthropic"
]
},
{
@@ -645,7 +645,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"# Initialize all_texts with leaf_texts\n",
"all_texts = leaf_texts.copy()\n",

View File

@@ -36,7 +36,6 @@ Notebook | Description
[llm_symbolic_math.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_symbolic_math.ipynb) | Solve algebraic equations with the help of llms (language learning models) and sympy, a python library for symbolic mathematics.
[meta_prompt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/meta_prompt.ipynb) | Implement the meta-prompt concept, which is a method for building self-improving agents that reflect on their own performance and modify their instructions accordingly.
[multi_modal_output_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_modal_output_agent.ipynb) | Generate multi-modal outputs, specifically images and text.
[multi_modal_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_modal_RAG_vdms.ipynb) | Perform retrieval-augmented generation (rag) on documents including text and images, using unstructured for parsing, Intel's Visual Data Management System (VDMS) as the vectorstore, and chains.
[multi_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_player_dnd.ipynb) | Simulate multi-player dungeons & dragons games, with a custom function determining the speaking schedule of the agents.
[multiagent_authoritarian.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_authoritarian.ipynb) | Implement a multi-agent simulation where a privileged agent controls the conversation, including deciding who speaks and when the conversation ends, in the context of a simulated news network.
[multiagent_bidding.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_bidding.ipynb) | Implement a multi-agent simulation where agents bid to speak, with the highest bidder speaking next, demonstrated through a fictitious presidential debate example.
@@ -58,6 +57,4 @@ Notebook | Description
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
[visual_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/visual_RAG_vdms.ipynb) | Performs Visual Retrieval-Augmented-Generation (RAG) using videos and scene descriptions generated by open source models.
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.

View File

@@ -39,7 +39,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-chroma \"unstructured[all-docs]\" pydantic lxml langchainhub"
"! pip install langchain unstructured[all-docs] pydantic lxml langchainhub"
]
},
{
@@ -320,7 +320,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -59,7 +59,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-chroma \"unstructured[all-docs]\" pydantic lxml"
"! pip install langchain unstructured[all-docs] pydantic lxml"
]
},
{
@@ -375,7 +375,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -59,7 +59,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-chroma \"unstructured[all-docs]\" pydantic lxml"
"! pip install langchain unstructured[all-docs] pydantic lxml"
]
},
{
@@ -378,8 +378,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import GPT4AllEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",

View File

@@ -19,7 +19,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain openai langchain_chroma langchain-experimental # (newest versions required for multi-modal)"
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
]
},
{
@@ -132,7 +132,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"baseline = Chroma.from_texts(\n",

View File

@@ -28,7 +28,7 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",

View File

@@ -14,7 +14,7 @@
}
],
"source": [
"%pip install -qU langchain-airbyte langchain_chroma"
"%pip install -qU langchain-airbyte"
]
},
{
@@ -123,7 +123,7 @@
"outputs": [],
"source": [
"import tiktoken\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",

View File

@@ -166,7 +166,7 @@
"source": [
"### SQL Database Agent example\n",
"\n",
"This example demonstrates the use of the [SQL Database Agent](/docs/integrations/tools/sql_database) for answering questions over a Databricks database."
"This example demonstrates the use of the [SQL Database Agent](/docs/integrations/toolkits/sql_database.html) for answering questions over a Databricks database."
]
},
{

View File

@@ -39,7 +39,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub langchain-chroma hnswlib --upgrade --quiet"
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet"
]
},
{
@@ -547,7 +547,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -84,7 +84,7 @@
}
],
"source": [
"%pip install --quiet pypdf langchain-chroma tiktoken openai \n",
"%pip install --quiet pypdf chromadb tiktoken openai \n",
"%pip uninstall -y langchain-fireworks\n",
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
]
@@ -138,7 +138,7 @@
"all_splits = text_splitter.split_documents(data)\n",
"\n",
"# Add to vectorDB\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
"\n",
"vectorstore = Chroma.from_documents(\n",

View File

@@ -170,7 +170,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",
"with open(\"../../state_of_the_union.txt\") as f:\n",

File diff suppressed because one or more lines are too long

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
]
},
{
@@ -30,8 +30,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph tavily-python"
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph tavily-python"
]
},
{
@@ -77,8 +77,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",
@@ -180,8 +180,8 @@
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema import Document\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
]
},
{
@@ -86,8 +86,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",
@@ -188,7 +188,7 @@
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",

View File

@@ -58,7 +58,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
]
},
{
@@ -187,7 +187,7 @@
"\n",
"import chromadb\n",
"import numpy as np\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
"from PIL import Image as _PILImage\n",
"\n",

View File

@@ -18,7 +18,26 @@
"* Use of multimodal embeddings (such as [CLIP](https://openai.com/research/clip)) to embed images and text\n",
"* Use of [VDMS](https://github.com/IntelLabs/vdms/blob/master/README.md) as a vector store with support for multi-modal\n",
"* Retrieval of both images and text using similarity search\n",
"* Passing raw images and text chunks to a multimodal LLM for answer synthesis "
"* Passing raw images and text chunks to a multimodal LLM for answer synthesis \n",
"\n",
"\n",
"## Packages\n",
"\n",
"For `unstructured`, you will also need `poppler` ([installation instructions](https://pdf2image.readthedocs.io/en/latest/installation.html)) and `tesseract` ([installation instructions](https://tesseract-ocr.github.io/tessdoc/Installation.html)) in your system."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "febbc459-ebba-4c1a-a52b-fed7731593f8",
"metadata": {},
"outputs": [],
"source": [
"# (newest versions required for multi-modal)\n",
"! pip install --quiet -U vdms langchain-experimental\n",
"\n",
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
"! pip install --quiet pdf2image \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml open_clip_torch"
]
},
{
@@ -34,7 +53,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "5f483872",
"metadata": {},
"outputs": [
@@ -42,7 +61,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
"a1b9206b08ef626e15b356bf9e031171f7c7eb8f956a2733f196f0109246fe2b\n"
"docker: Error response from daemon: Conflict. The container name \"/vdms_rag_nb\" is already in use by container \"0c19ed281463ac10d7efe07eb815643e3e534ddf24844357039453ad2b0c27e8\". You have to remove (or rename) that container to be able to reuse that name.\n",
"See 'docker run --help'.\n"
]
}
],
@@ -55,32 +75,9 @@
"vdms_client = VDMS_Client(port=55559)"
]
},
{
"cell_type": "markdown",
"id": "2498a0a1",
"metadata": {},
"source": [
"## Packages\n",
"\n",
"For `unstructured`, you will also need `poppler` ([installation instructions](https://pdf2image.readthedocs.io/en/latest/installation.html)) and `tesseract` ([installation instructions](https://tesseract-ocr.github.io/tessdoc/Installation.html)) in your system."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "febbc459-ebba-4c1a-a52b-fed7731593f8",
"metadata": {},
"outputs": [],
"source": [
"! pip install --quiet -U vdms langchain-experimental\n",
"\n",
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
"! pip install --quiet pdf2image \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml open_clip_torch"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "78ac6543",
"metadata": {},
"outputs": [],
@@ -98,9 +95,14 @@
"\n",
"### Partition PDF text and images\n",
" \n",
"Let's use famous photographs from the PDF version of Library of Congress Magazine in this example.\n",
"Let's look at an example pdf containing interesting images.\n",
"\n",
"We can use `partition_pdf` from [Unstructured](https://unstructured-io.github.io/unstructured/introduction.html#key-concepts) to extract text and images."
"Famous photographs from library of congress:\n",
"\n",
"* https://www.loc.gov/lcm/pdf/LCM_2020_1112.pdf\n",
"* We'll use this as an example below\n",
"\n",
"We can use `partition_pdf` below from [Unstructured](https://unstructured-io.github.io/unstructured/introduction.html#key-concepts) to extract text and images."
]
},
{
@@ -114,8 +116,8 @@
"\n",
"import requests\n",
"\n",
"# Folder to store pdf and extracted images\n",
"datapath = Path(\"./data/multimodal_files\").resolve()\n",
"# Folder with pdf and extracted images\n",
"datapath = Path(\"./multimodal_files\").resolve()\n",
"datapath.mkdir(parents=True, exist_ok=True)\n",
"\n",
"pdf_url = \"https://www.loc.gov/lcm/pdf/LCM_2020_1112.pdf\"\n",
@@ -172,8 +174,14 @@
"source": [
"## Multi-modal embeddings with our document\n",
"\n",
"In this section, we initialize the VDMS vector store for both text and images. For better performance, we use model `ViT-g-14` from [OpenClip multimodal embeddings](https://python.langchain.com/docs/integrations/text_embedding/open_clip).\n",
"The images are stored as base64 encoded strings with `vectorstore.add_images`.\n"
"We will use [OpenClip multimodal embeddings](https://python.langchain.com/docs/integrations/text_embedding/open_clip).\n",
"\n",
"We use a larger model for better performance (set in `langchain_experimental.open_clip.py`).\n",
"\n",
"```\n",
"model_name = \"ViT-g-14\"\n",
"checkpoint = \"laion2b_s34b_b88k\"\n",
"```"
]
},
{
@@ -192,7 +200,9 @@
"vectorstore = VDMS(\n",
" client=vdms_client,\n",
" collection_name=\"mm_rag_clip_photos\",\n",
" embedding=OpenCLIPEmbeddings(model_name=\"ViT-g-14\", checkpoint=\"laion2b_s34b_b88k\"),\n",
" embedding_function=OpenCLIPEmbeddings(\n",
" model_name=\"ViT-g-14\", checkpoint=\"laion2b_s34b_b88k\"\n",
" ),\n",
")\n",
"\n",
"# Get image URIs with .jpg extension only\n",
@@ -223,7 +233,7 @@
"source": [
"## RAG\n",
"\n",
"Here we define helper functions for image results."
"`vectorstore.add_images` will store / retrieve images as base64 encoded strings."
]
},
{
@@ -382,8 +392,7 @@
"id": "1566096d-97c2-4ddc-ba4a-6ef88c525e4e",
"metadata": {},
"source": [
"## Test retrieval and run RAG\n",
"Now let's query for a `woman with children` and retrieve the top results."
"## Test retrieval and run RAG"
]
},
{
@@ -443,14 +452,6 @@
" print(doc.page_content)"
]
},
{
"cell_type": "markdown",
"id": "15e9b54d",
"metadata": {},
"source": [
"Now let's use the `multi_modal_rag_chain` to process the same query and display the response."
]
},
{
"cell_type": "code",
"execution_count": 11,
@@ -461,10 +462,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
" The image depicts a woman with several children. The woman appears to be of Cherokee heritage, as suggested by the text provided. The image is described as having been initially regretted by the subject, Florence Owens Thompson, due to her feeling that it did not accurately represent her leadership qualities.\n",
"The historical and cultural context of the image is tied to the Great Depression and the Dust Bowl, both of which affected the Cherokee people in Oklahoma. The photograph was taken during this period, and its subject, Florence Owens Thompson, was a leader within her community who worked tirelessly to help those affected by these crises.\n",
"The image's symbolism and meaning can be interpreted as a representation of resilience and strength in the face of adversity. The woman is depicted with multiple children, which could signify her role as a caregiver and protector during difficult times.\n",
"Connections between the image and the related text include Florence Owens Thompson's leadership qualities and her regretted feelings about the photograph. Additionally, the mention of Dorothea Lange, the photographer who took this photo, ties the image to its historical context and the broader narrative of the Great Depression and Dust Bowl in Oklahoma. \n"
"1. Detailed description of the visual elements in the image: The image features a woman with children, likely a mother and her family, standing together outside. They appear to be poor or struggling financially, as indicated by their attire and surroundings.\n",
"2. Historical and cultural context of the image: The photo was taken in 1936 during the Great Depression, when many families struggled to make ends meet. Dorothea Lange, a renowned American photographer, took this iconic photograph that became an emblem of poverty and hardship experienced by many Americans at that time.\n",
"3. Interpretation of the image's symbolism and meaning: The image conveys a sense of unity and resilience despite adversity. The woman and her children are standing together, displaying their strength as a family unit in the face of economic challenges. The photograph also serves as a reminder of the importance of empathy and support for those who are struggling.\n",
"4. Connections between the image and the related text: The text provided offers additional context about the woman in the photo, her background, and her feelings towards the photograph. It highlights the historical backdrop of the Great Depression and emphasizes the significance of this particular image as a representation of that time period.\n"
]
}
],
@@ -491,6 +492,14 @@
"source": [
"! docker kill vdms_rag_nb"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8ba652da",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@@ -509,7 +518,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.10.13"
}
},
"nbformat": 4,

View File

@@ -58,7 +58,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain"
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain"
]
},
{
@@ -167,7 +167,7 @@
"source": [
"import os\n",
"\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_nomic import NomicEmbeddings\n",

View File

@@ -56,7 +56,7 @@
},
"outputs": [],
"source": [
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain # (newest versions required for multi-modal)"
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
]
},
{
@@ -194,7 +194,7 @@
"\n",
"import chromadb\n",
"import numpy as np\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_nomic import NomicEmbeddings\n",
"from PIL import Image as _PILImage\n",
"\n",

View File

@@ -20,8 +20,8 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter"
]

View File

@@ -80,7 +80,7 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"embeddings = OpenAIEmbeddings()"

View File

@@ -1,756 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "10f50955-be55-422f-8c62-3a32f8cf02ed",
"metadata": {},
"source": [
"# RAG application running locally on Intel Xeon CPU using langchain and open-source models"
]
},
{
"cell_type": "markdown",
"id": "48113be6-44bb-4aac-aed3-76a1365b9561",
"metadata": {},
"source": [
"Author - Pratool Bharti (pratool.bharti@intel.com)"
]
},
{
"cell_type": "markdown",
"id": "8b10b54b-1572-4ea1-9c1e-1d29fcc3dcd9",
"metadata": {},
"source": [
"In this cookbook, we use langchain tools and open source models to execute locally on CPU. This notebook has been validated to run on Intel Xeon 8480+ CPU. Here we implement a RAG pipeline for Llama2 model to answer questions about Intel Q1 2024 earnings release."
]
},
{
"cell_type": "markdown",
"id": "acadbcec-3468-4926-8ce5-03b678041c0a",
"metadata": {},
"source": [
"**Create a conda or virtualenv environment with python >=3.10 and install following libraries**\n",
"<br>\n",
"\n",
"`pip install --upgrade langchain langchain-community langchainhub langchain-chroma bs4 gpt4all pypdf pysqlite3-binary` <br>\n",
"`pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu`"
]
},
{
"cell_type": "markdown",
"id": "84c392c8-700a-42ec-8e94-806597f22e43",
"metadata": {},
"source": [
"**Load pysqlite3 in sys modules since ChromaDB requires sqlite3.**"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "145cd491-b388-4ea7-bdc8-2f4995cac6fd",
"metadata": {},
"outputs": [],
"source": [
"__import__(\"pysqlite3\")\n",
"import sys\n",
"\n",
"sys.modules[\"sqlite3\"] = sys.modules.pop(\"pysqlite3\")"
]
},
{
"cell_type": "markdown",
"id": "14dde7e2-b236-49b9-b3a0-08c06410418c",
"metadata": {},
"source": [
"**Import essential components from langchain to load and split data**"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "887643ba-249e-48d6-9aa7-d25087e8dfbf",
"metadata": {},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_community.document_loaders import PyPDFLoader"
]
},
{
"cell_type": "markdown",
"id": "922c0eba-8736-4de5-bd2f-3d0f00b16e43",
"metadata": {},
"source": [
"**Download Intel Q1 2024 earnings release**"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "2d6a2419-5338-4188-8615-a40a65ff8019",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-07-15 15:04:43-- https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf\n",
"Resolving proxy-dmz.intel.com (proxy-dmz.intel.com)... 10.7.211.16\n",
"Connecting to proxy-dmz.intel.com (proxy-dmz.intel.com)|10.7.211.16|:912... connected.\n",
"Proxy request sent, awaiting response... 200 OK\n",
"Length: 133510 (130K) [application/pdf]\n",
"Saving to: intel_q1_2024_earnings.pdf\n",
"\n",
"intel_q1_2024_earni 100%[===================>] 130.38K --.-KB/s in 0.005s \n",
"\n",
"2024-07-15 15:04:44 (24.6 MB/s) - intel_q1_2024_earnings.pdf saved [133510/133510]\n",
"\n"
]
}
],
"source": [
"!wget 'https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf' -O intel_q1_2024_earnings.pdf"
]
},
{
"cell_type": "markdown",
"id": "e3612627-e105-453d-8a50-bbd6e39dedb5",
"metadata": {},
"source": [
"**Loading earning release pdf document through PyPDFLoader**"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "cac6278e-ebad-4224-a062-bf6daca24cb0",
"metadata": {},
"outputs": [],
"source": [
"loader = PyPDFLoader(\"intel_q1_2024_earnings.pdf\")\n",
"data = loader.load()"
]
},
{
"cell_type": "markdown",
"id": "a7dca43b-1c62-41df-90c7-6ed2904f823d",
"metadata": {},
"source": [
"**Splitting entire document in several chunks with each chunk size is 500 tokens**"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "4486adbe-0d0e-4685-8c08-c1774ed6e993",
"metadata": {},
"outputs": [],
"source": [
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
"all_splits = text_splitter.split_documents(data)"
]
},
{
"cell_type": "markdown",
"id": "af142346-e793-4a52-9a56-63e3be416b3d",
"metadata": {},
"source": [
"**Looking at the first split of the document**"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e4240fd1-898e-4bfc-a377-02c9bc25b56e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(metadata={'source': 'intel_q1_2024_earnings.pdf', 'page': 0}, page_content='Intel Corporation\\n2200 Mission College Blvd.\\nSanta Clara, CA 95054-1549\\n \\nNews Release\\n Intel Reports First -Quarter 2024 Financial Results\\nNEWS SUMMARY\\n▪First-quarter revenue of $12.7 billion , up 9% year over year (YoY).\\n▪First-quarter GAAP earnings (loss) per share (EPS) attributable to Intel was $(0.09) ; non-GAAP EPS \\nattributable to Intel was $0.18 .')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"all_splits[0]"
]
},
{
"cell_type": "markdown",
"id": "b88d2632-7c1b-49ef-a691-c0eb67d23e6a",
"metadata": {},
"source": [
"**One of the major step in RAG is to convert each split of document into embeddings and store in a vector database such that searching relevant documents are efficient.** <br>\n",
"**For that, importing Chroma vector database from langchain. Also, importing open source GPT4All for embedding models**"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "9ff99dd7-9d47-4239-ba0a-d775792334ba",
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import GPT4AllEmbeddings"
]
},
{
"cell_type": "markdown",
"id": "b5d1f4dd-dd8d-4a20-95d1-2dbdd204375a",
"metadata": {},
"source": [
"**In next step, we will download one of the most popular embedding model \"all-MiniLM-L6-v2\". Find more details of the model at this link https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2**"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "05db3494-5d8e-4a13-9941-26330a86f5e5",
"metadata": {},
"outputs": [],
"source": [
"model_name = \"all-MiniLM-L6-v2.gguf2.f16.gguf\"\n",
"gpt4all_kwargs = {\"allow_download\": \"True\"}\n",
"embeddings = GPT4AllEmbeddings(model_name=model_name, gpt4all_kwargs=gpt4all_kwargs)"
]
},
{
"cell_type": "markdown",
"id": "4e53999e-1983-46ac-8039-2783e194c3ae",
"metadata": {},
"source": [
"**Store all the embeddings in the Chroma database**"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "0922951a-9ddf-4761-973d-8e9a86f61284",
"metadata": {},
"outputs": [],
"source": [
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)"
]
},
{
"cell_type": "markdown",
"id": "29f94fa0-6c75-4a65-a1a3-debc75422479",
"metadata": {},
"source": [
"**Now, let's find relevant splits from the documents related to the question**"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "88c8152d-ec7a-4f0b-9d86-877789407537",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"4\n"
]
}
],
"source": [
"question = \"What is Intel CCG revenue in Q1 2024\"\n",
"docs = vectorstore.similarity_search(question)\n",
"print(len(docs))"
]
},
{
"cell_type": "markdown",
"id": "53330c6b-cb0f-43f9-b379-2e57ac1e5335",
"metadata": {},
"source": [
"**Look at the first retrieved document from the vector database**"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "43a6d94f-b5c4-47b0-a353-2db4c3d24d9c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(metadata={'page': 1, 'source': 'intel_q1_2024_earnings.pdf'}, page_content='Client Computing Group (CCG) $7.5 billion up31%\\nData Center and AI (DCAI) $3.0 billion up5%\\nNetwork and Edge (NEX) $1.4 billion down 8%\\nTotal Intel Products revenue $11.9 billion up17%\\nIntel Foundry $4.4 billion down 10%\\nAll other:\\nAltera $342 million down 58%\\nMobileye $239 million down 48%\\nOther $194 million up17%\\nTotal all other revenue $775 million down 46%\\nIntersegment eliminations $(4.4) billion\\nTotal net revenue $12.7 billion up9%\\nIntel Products Highlights')"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"docs[0]"
]
},
{
"cell_type": "markdown",
"id": "64ba074f-4b36-442e-b7e2-b26d6e2815c3",
"metadata": {},
"source": [
"**Download Lllama-2 model from Huggingface and store locally** <br>\n",
"**You can download different quantization variant of Lllama-2 model from the link below. We are using Q8 version here (7.16GB).** <br>\n",
"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c8dd0811-6f43-4bc6-b854-2ab377639c9a",
"metadata": {},
"outputs": [],
"source": [
"!huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q8_0.gguf --local-dir . --local-dir-use-symlinks False"
]
},
{
"cell_type": "markdown",
"id": "3895b1f5-f51d-4539-abf0-af33d7ca48ea",
"metadata": {},
"source": [
"**Import langchain components required to load downloaded LLMs model**"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "fb087088-aa62-44c0-8356-061e9b9f1186",
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.llms import LlamaCpp"
]
},
{
"cell_type": "markdown",
"id": "5a8a111e-2614-4b70-b034-85cd3e7304cb",
"metadata": {},
"source": [
"**Loading the local Lllama-2 model using Llama-cpp library**"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "fb917da2-c0d7-4995-b56d-26254276e0da",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"llama_model_loader: loaded meta data with 19 key-value pairs and 291 tensors from llama-2-7b-chat.Q8_0.gguf (version GGUF V2)\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = llama\n",
"llama_model_loader: - kv 1: general.name str = LLaMA v2\n",
"llama_model_loader: - kv 2: llama.context_length u32 = 4096\n",
"llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n",
"llama_model_loader: - kv 4: llama.block_count u32 = 32\n",
"llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008\n",
"llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n",
"llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n",
"llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32\n",
"llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000001\n",
"llama_model_loader: - kv 10: general.file_type u32 = 7\n",
"llama_model_loader: - kv 11: tokenizer.ggml.model str = llama\n",
"llama_model_loader: - kv 12: tokenizer.ggml.tokens arr[str,32000] = [\"<unk>\", \"<s>\", \"</s>\", \"<0x00>\", \"<...\n",
"llama_model_loader: - kv 13: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n",
"llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 = 1\n",
"llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 = 2\n",
"llama_model_loader: - kv 17: tokenizer.ggml.unknown_token_id u32 = 0\n",
"llama_model_loader: - kv 18: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 65 tensors\n",
"llama_model_loader: - type q8_0: 226 tensors\n",
"llm_load_vocab: special tokens cache size = 259\n",
"llm_load_vocab: token to piece cache size = 0.1684 MB\n",
"llm_load_print_meta: format = GGUF V2\n",
"llm_load_print_meta: arch = llama\n",
"llm_load_print_meta: vocab type = SPM\n",
"llm_load_print_meta: n_vocab = 32000\n",
"llm_load_print_meta: n_merges = 0\n",
"llm_load_print_meta: vocab_only = 0\n",
"llm_load_print_meta: n_ctx_train = 4096\n",
"llm_load_print_meta: n_embd = 4096\n",
"llm_load_print_meta: n_layer = 32\n",
"llm_load_print_meta: n_head = 32\n",
"llm_load_print_meta: n_head_kv = 32\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_swa = 0\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 1\n",
"llm_load_print_meta: n_embd_k_gqa = 4096\n",
"llm_load_print_meta: n_embd_v_gqa = 4096\n",
"llm_load_print_meta: f_norm_eps = 0.0e+00\n",
"llm_load_print_meta: f_norm_rms_eps = 1.0e-06\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 0.0e+00\n",
"llm_load_print_meta: n_ff = 11008\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = linear\n",
"llm_load_print_meta: freq_base_train = 10000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_ctx_orig_yarn = 4096\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = 7B\n",
"llm_load_print_meta: model ftype = Q8_0\n",
"llm_load_print_meta: model params = 6.74 B\n",
"llm_load_print_meta: model size = 6.67 GiB (8.50 BPW) \n",
"llm_load_print_meta: general.name = LLaMA v2\n",
"llm_load_print_meta: BOS token = 1 '<s>'\n",
"llm_load_print_meta: EOS token = 2 '</s>'\n",
"llm_load_print_meta: UNK token = 0 '<unk>'\n",
"llm_load_print_meta: LF token = 13 '<0x0A>'\n",
"llm_load_print_meta: max token length = 48\n",
"llm_load_tensors: ggml ctx size = 0.14 MiB\n",
"llm_load_tensors: CPU buffer size = 6828.64 MiB\n",
"...................................................................................................\n",
"llama_new_context_with_model: n_ctx = 2048\n",
"llama_new_context_with_model: n_batch = 512\n",
"llama_new_context_with_model: n_ubatch = 512\n",
"llama_new_context_with_model: flash_attn = 0\n",
"llama_new_context_with_model: freq_base = 10000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CPU KV buffer size = 1024.00 MiB\n",
"llama_new_context_with_model: KV self size = 1024.00 MiB, K (f16): 512.00 MiB, V (f16): 512.00 MiB\n",
"llama_new_context_with_model: CPU output buffer size = 0.12 MiB\n",
"llama_new_context_with_model: CPU compute buffer size = 164.01 MiB\n",
"llama_new_context_with_model: graph nodes = 1030\n",
"llama_new_context_with_model: graph splits = 1\n",
"AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 0 | \n",
"Model metadata: {'tokenizer.ggml.unknown_token_id': '0', 'tokenizer.ggml.eos_token_id': '2', 'general.architecture': 'llama', 'llama.context_length': '4096', 'general.name': 'LLaMA v2', 'llama.embedding_length': '4096', 'llama.feed_forward_length': '11008', 'llama.attention.layer_norm_rms_epsilon': '0.000001', 'llama.rope.dimension_count': '128', 'llama.attention.head_count': '32', 'tokenizer.ggml.bos_token_id': '1', 'llama.block_count': '32', 'llama.attention.head_count_kv': '32', 'general.quantization_version': '2', 'tokenizer.ggml.model': 'llama', 'general.file_type': '7'}\n",
"Using fallback chat format: llama-2\n"
]
}
],
"source": [
"llm = LlamaCpp(\n",
" model_path=\"llama-2-7b-chat.Q8_0.gguf\",\n",
" n_gpu_layers=-1,\n",
" n_batch=512,\n",
" n_ctx=2048,\n",
" f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls\n",
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "43e06f56-ef97-451b-87d9-8465ea442aed",
"metadata": {},
"source": [
"**Now let's ask the same question to Llama model without showing them the earnings release.**"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "1033dd82-5532-437d-a548-27695e109589",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"?\n",
"(NASDAQ:INTC)\n",
"Intel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year."
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"llama_print_timings: load time = 131.20 ms\n",
"llama_print_timings: sample time = 16.05 ms / 68 runs ( 0.24 ms per token, 4236.76 tokens per second)\n",
"llama_print_timings: prompt eval time = 131.14 ms / 16 tokens ( 8.20 ms per token, 122.01 tokens per second)\n",
"llama_print_timings: eval time = 3225.00 ms / 67 runs ( 48.13 ms per token, 20.78 tokens per second)\n",
"llama_print_timings: total time = 3466.40 ms / 83 tokens\n"
]
},
{
"data": {
"text/plain": [
"\"?\\n(NASDAQ:INTC)\\nIntel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year.\""
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.invoke(question)"
]
},
{
"cell_type": "markdown",
"id": "75f5cb10-746f-4e37-9386-b85a4d2b84ef",
"metadata": {},
"source": [
"**As you can see, model is giving wrong information. Correct asnwer is CCG revenue in Q1 2024 is $7.5B. Now let's apply RAG using the earning release document**"
]
},
{
"cell_type": "markdown",
"id": "0f4150ec-5692-4756-b11a-22feb7ab88ff",
"metadata": {},
"source": [
"**in RAG, we modify the input prompt by adding relevent documents with the question. Here, we use one of the popular RAG prompt**"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "226c14b0-f43e-4a1f-a1e4-04731d467ec4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: {question} \\nContext: {context} \\nAnswer:\"))]"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import hub\n",
"\n",
"rag_prompt = hub.pull(\"rlm/rag-prompt\")\n",
"rag_prompt.messages"
]
},
{
"cell_type": "markdown",
"id": "77deb6a0-0950-450a-916a-f2a029676c20",
"metadata": {},
"source": [
"**Appending all retreived documents in a single document**"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "2dbc3327-6ef3-4c1f-8797-0c71964b0921",
"metadata": {},
"outputs": [],
"source": [
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)"
]
},
{
"cell_type": "markdown",
"id": "2e2d9f18-49d0-43a3-bea8-78746ffa86b7",
"metadata": {},
"source": [
"**The last step is to create a chain using langchain tool that will create an e2e pipeline. It will take question and context as an input.**"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "427379c2-51ff-4e0f-8278-a45221363299",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough, RunnablePick\n",
"\n",
"# Chain\n",
"chain = (\n",
" RunnablePassthrough.assign(context=RunnablePick(\"context\") | format_docs)\n",
" | rag_prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "095d6280-c949-4d00-8e32-8895a82d245f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Llama.generate: prefix-match hit\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%."
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"llama_print_timings: load time = 131.20 ms\n",
"llama_print_timings: sample time = 7.74 ms / 31 runs ( 0.25 ms per token, 4004.13 tokens per second)\n",
"llama_print_timings: prompt eval time = 2529.41 ms / 674 tokens ( 3.75 ms per token, 266.46 tokens per second)\n",
"llama_print_timings: eval time = 1542.94 ms / 30 runs ( 51.43 ms per token, 19.44 tokens per second)\n",
"llama_print_timings: total time = 4123.68 ms / 704 tokens\n"
]
},
{
"data": {
"text/plain": [
"' Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%.'"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"context\": docs, \"question\": question})"
]
},
{
"cell_type": "markdown",
"id": "638364b2-6bd2-4471-9961-d3a1d1b9d4ee",
"metadata": {},
"source": [
"**Now we see the results are correct as it is mentioned in earnings release.** <br>\n",
"**To further automate, we will create a chain that will take input as question and retriever so that we don't need to retrieve documents seperately**"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "4654e5b7-635f-4767-8b31-4c430164cdd5",
"metadata": {},
"outputs": [],
"source": [
"retriever = vectorstore.as_retriever()\n",
"qa_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | rag_prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0979f393-fd0a-4e82-b844-68371c6ad68f",
"metadata": {},
"source": [
"**Now we only need to pass the question to the chain and it will fetch the contexts directly from the vector database to generate the answer**\n",
"<br>\n",
"**Let's try with another question**"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "3ea07b82-e6ec-4084-85f4-191373530172",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Llama.generate: prefix-match hit\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%."
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"llama_print_timings: load time = 131.20 ms\n",
"llama_print_timings: sample time = 6.28 ms / 31 runs ( 0.20 ms per token, 4937.88 tokens per second)\n",
"llama_print_timings: prompt eval time = 2681.93 ms / 730 tokens ( 3.67 ms per token, 272.19 tokens per second)\n",
"llama_print_timings: eval time = 1471.07 ms / 30 runs ( 49.04 ms per token, 20.39 tokens per second)\n",
"llama_print_timings: total time = 4206.77 ms / 760 tokens\n"
]
},
{
"data": {
"text/plain": [
"' According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%.'"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"qa_chain.invoke(\"what is Intel DCAI revenue in Q1 2024?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9407f2a0-4a35-4315-8e96-02fcb80f210c",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "rag-on-intel",
"language": "python",
"name": "rag-on-intel"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -36,10 +36,10 @@
"from bs4 import BeautifulSoup as Soup\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders.recursive_url_loader import (\n",
" RecursiveUrlLoader,\n",
")\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"# For our example, we'll load docs from the web\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
@@ -370,14 +370,13 @@
],
"source": [
"import torch\n",
"from langchain_huggingface.llms import HuggingFacePipeline\n",
"from optimum.intel.ipex import IPEXModelForCausalLM\n",
"from transformers import AutoTokenizer, pipeline\n",
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
"\n",
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
"model = IPEXModelForCausalLM.from_pretrained(\n",
" model_id, torch_dtype=torch.bfloat16, export=True\n",
"model = AutoModelForCausalLM.from_pretrained(\n",
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
")\n",
"\n",
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
@@ -582,7 +581,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.18"
}
},
"nbformat": 4,

View File

@@ -740,7 +740,7 @@ Even this relatively large model will most likely fail to generate more complica
```bash
poetry run pip install pyyaml langchain_chroma
poetry run pip install pyyaml chromadb
import yaml
```
@@ -994,7 +994,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
from langchain_chroma import Chroma
from langchain_community.vectorstores import Chroma
example_prompt = PromptTemplate(
input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],

View File

@@ -22,7 +22,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install --quiet pypdf tiktoken openai langchain-chroma langchain-together"
"! pip install --quiet pypdf chromadb tiktoken openai langchain-together"
]
},
{
@@ -45,8 +45,8 @@
"all_splits = text_splitter.split_documents(data)\n",
"\n",
"# Add to vectorDB\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"\"\"\"\n",
"from langchain_together.embeddings import TogetherEmbeddings\n",

File diff suppressed because one or more lines are too long

View File

@@ -13,12 +13,7 @@ OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
PYTHON = .venv/bin/python
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec sh -c ' \
for dir; do \
if find "$$dir" -maxdepth 1 -type f \( -name "pyproject.toml" -o -name "setup.py" \) | grep -q .; then \
echo "$$dir"; \
fi \
done' sh {} + | grep -vE "airbyte|ibm|couchbase" | tr '\n' ' ')
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
PORT ?= 3001
@@ -41,11 +36,9 @@ generate-files:
cp -r $(SOURCE_DIR)/* $(INTERMEDIATE_DIR)
mkdir -p $(INTERMEDIATE_DIR)/templates
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR)
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
$(PYTHON) scripts/kv_store_feat_table.py $(INTERMEDIATE_DIR)
$(PYTHON) scripts/partner_pkg_table.py $(INTERMEDIATE_DIR)
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
@@ -70,23 +63,16 @@ render:
md-sync:
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --include="*/_category_.yml" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
append-related:
$(PYTHON) scripts/append_related_links.py $(OUTPUT_NEW_DOCS_DIR)
generate-references:
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
build: install-py-deps generate-files copy-infra render md-sync append-related
build: install-py-deps generate-files copy-infra render md-sync
vercel-build: install-vercel-deps build generate-references
rm -rf docs
mv $(OUTPUT_NEW_DOCS_DIR) docs
rm -rf build
mkdir static/api_reference
git clone --depth=1 https://github.com/baskaryan/langchain-api-docs-build.git
mv langchain-api-docs-build/api_reference_build/html/* static/api_reference/
rm -rf langchain-api-docs-build
NODE_OPTIONS="--max-old-space-size=5000" yarn run docusaurus build
yarn run docusaurus build
mv build v0.2
mkdir build
mv v0.2 build

View File

@@ -1,144 +0,0 @@
"""A directive to generate a gallery of images from structured data.
Generating a gallery of images that are all the same size is a common
pattern in documentation, and this can be cumbersome if the gallery is
generated programmatically. This directive wraps this particular use-case
in a helper-directive to generate it with a single YAML configuration file.
It currently exists for maintainers of the pydata-sphinx-theme,
but might be abstracted into a standalone package if it proves useful.
"""
from pathlib import Path
from typing import Any, ClassVar, Dict, List
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.application import Sphinx
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from yaml import safe_load
logger = logging.getLogger(__name__)
TEMPLATE_GRID = """
`````{{grid}} {columns}
{options}
{content}
`````
"""
GRID_CARD = """
````{{grid-item-card}} {title}
{options}
{content}
````
"""
class GalleryGridDirective(SphinxDirective):
"""A directive to show a gallery of images and links in a Bootstrap grid.
The grid can be generated from a YAML file that contains a list of items, or
from the content of the directive (also formatted in YAML). Use the parameter
"class-card" to add an additional CSS class to all cards. When specifying the grid
items, you can use all parameters from "grid-item-card" directive to customize
individual cards + ["image", "header", "content", "title"].
Danger:
This directive can only be used in the context of a Myst documentation page as
the templates use Markdown flavored formatting.
"""
name = "gallery-grid"
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec: ClassVar[dict[str, Any]] = {
# A class to be added to the resulting container
"grid-columns": directives.unchanged,
"class-container": directives.unchanged,
"class-card": directives.unchanged,
}
def run(self) -> List[nodes.Node]:
"""Create the gallery grid."""
if self.arguments:
# If an argument is given, assume it's a path to a YAML file
# Parse it and load it into the directive content
path_data_rel = Path(self.arguments[0])
path_doc, _ = self.get_source_info()
path_doc = Path(path_doc).parent
path_data = (path_doc / path_data_rel).resolve()
if not path_data.exists():
logger.info(f"Could not find grid data at {path_data}.")
nodes.text("No grid data found at {path_data}.")
return
yaml_string = path_data.read_text()
else:
yaml_string = "\n".join(self.content)
# Use all the element with an img-bottom key as sites to show
# and generate a card item for each of them
grid_items = []
for item in safe_load(yaml_string):
# remove parameters that are not needed for the card options
title = item.pop("title", "")
# build the content of the card using some extra parameters
header = f"{item.pop('header')} \n^^^ \n" if "header" in item else ""
image = f"![image]({item.pop('image')}) \n" if "image" in item else ""
content = f"{item.pop('content')} \n" if "content" in item else ""
# optional parameter that influence all cards
if "class-card" in self.options:
item["class-card"] = self.options["class-card"]
loc_options_str = "\n".join(f":{k}: {v}" for k, v in item.items()) + " \n"
card = GRID_CARD.format(
options=loc_options_str, content=header + image + content, title=title
)
grid_items.append(card)
# Parse the template with Sphinx Design to create an output container
# Prep the options for the template grid
class_ = "gallery-directive" + f' {self.options.get("class-container", "")}'
options = {"gutter": 2, "class-container": class_}
options_str = "\n".join(f":{k}: {v}" for k, v in options.items())
# Create the directive string for the grid
grid_directive = TEMPLATE_GRID.format(
columns=self.options.get("grid-columns", "1 2 3 4"),
options=options_str,
content="\n".join(grid_items),
)
# Parse content as a directive so Sphinx Design processes it
container = nodes.container()
self.state.nested_parse([grid_directive], 0, container)
# Sphinx Design outputs a container too, so just use that
return [container.children[0]]
def setup(app: Sphinx) -> Dict[str, Any]:
"""Add custom configuration to sphinx app.
Args:
app: the Sphinx application
Returns:
the 2 parallel parameters set to ``True``.
"""
app.add_directive("gallery-grid", GalleryGridDirective)
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
}

View File

@@ -1,411 +1,26 @@
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;700&display=swap');
/*******************************************************************************
* master color map. Only the colors that actually differ between light and dark
* themes are specified separately.
*
* To see the full list of colors see https://www.figma.com/file/rUrrHGhUBBIAAjQ82x6pz9/PyData-Design-system---proposal-for-implementation-(2)?node-id=1234%3A765&t=ifcFT1JtnrSshGfi-1
*/
/**
* Function to get items from nested maps
*/
/* Assign base colors for the PyData theme */
:root {
--pst-teal-50: #f4fbfc;
--pst-teal-100: #e9f6f8;
--pst-teal-200: #d0ecf1;
--pst-teal-300: #abdde6;
--pst-teal-400: #3fb1c5;
--pst-teal-500: #0a7d91;
--pst-teal-600: #085d6c;
--pst-teal-700: #064752;
--pst-teal-800: #042c33;
--pst-teal-900: #021b1f;
--pst-violet-50: #f4eefb;
--pst-violet-100: #e0c7ff;
--pst-violet-200: #d5b4fd;
--pst-violet-300: #b780ff;
--pst-violet-400: #9c5ffd;
--pst-violet-500: #8045e5;
--pst-violet-600: #6432bd;
--pst-violet-700: #4b258f;
--pst-violet-800: #341a61;
--pst-violet-900: #1e0e39;
--pst-gray-50: #f9f9fa;
--pst-gray-100: #f3f4f5;
--pst-gray-200: #e5e7ea;
--pst-gray-300: #d1d5da;
--pst-gray-400: #9ca4af;
--pst-gray-500: #677384;
--pst-gray-600: #48566b;
--pst-gray-700: #29313d;
--pst-gray-800: #222832;
--pst-gray-900: #14181e;
--pst-pink-50: #fcf8fd;
--pst-pink-100: #fcf0fa;
--pst-pink-200: #f8dff5;
--pst-pink-300: #f3c7ee;
--pst-pink-400: #e47fd7;
--pst-pink-500: #c132af;
--pst-pink-600: #912583;
--pst-pink-700: #6e1c64;
--pst-pink-800: #46123f;
--pst-pink-900: #2b0b27;
--pst-foundation-white: #ffffff;
--pst-foundation-black: #14181e;
--pst-green-10: #f1fdfd;
--pst-green-50: #E0F7F6;
--pst-green-100: #B3E8E6;
--pst-green-200: #80D6D3;
--pst-green-300: #4DC4C0;
--pst-green-400: #4FB2AD;
--pst-green-500: #287977;
--pst-green-600: #246161;
--pst-green-700: #204F4F;
--pst-green-800: #1C3C3C;
--pst-green-900: #0D2427;
--pst-lilac-50: #f4eefb;
--pst-lilac-100: #DAD6FE;
--pst-lilac-200: #BCB2FD;
--pst-lilac-300: #9F8BFA;
--pst-lilac-400: #7F5CF6;
--pst-lilac-500: #6F3AED;
--pst-lilac-600: #6028D9;
--pst-lilac-700: #5021B6;
--pst-lilac-800: #431D95;
--pst-lilac-900: #1e0e39;
--pst-header-height: 2.5rem;
pre {
white-space: break-spaces;
}
html {
--pst-font-family-base: 'Inter';
--pst-font-family-heading: 'Inter Tight', sans-serif;
@media (min-width: 1200px) {
.container,
.container-lg,
.container-md,
.container-sm,
.container-xl {
max-width: 2560px !important;
}
}
/*******************************************************************************
* write the color rules for each theme (light/dark)
*/
/* NOTE:
* Mixins enable us to reuse the same definitions for the different modes
* https://sass-lang.com/documentation/at-rules/mixin
* something inserts a variable into a CSS selector or property name
* https://sass-lang.com/documentation/interpolation
*/
/* Defaults to light mode if data-theme is not set */
html:not([data-theme]) {
--pst-color-primary: #287977;
--pst-color-primary-bg: #80D6D3;
--pst-color-secondary: #6F3AED;
--pst-color-secondary-bg: #DAD6FE;
--pst-color-accent: #c132af;
--pst-color-accent-bg: #f8dff5;
--pst-color-info: #276be9;
--pst-color-info-bg: #dce7fc;
--pst-color-warning: #f66a0a;
--pst-color-warning-bg: #f8e3d0;
--pst-color-success: #00843f;
--pst-color-success-bg: #d6ece1;
--pst-color-attention: var(--pst-color-warning);
--pst-color-attention-bg: var(--pst-color-warning-bg);
--pst-color-danger: #d72d47;
--pst-color-danger-bg: #f9e1e4;
--pst-color-text-base: #222832;
--pst-color-text-muted: #48566b;
--pst-color-heading-color: #ffffff;
--pst-color-shadow: rgba(0, 0, 0, 0.1);
--pst-color-border: #d1d5da;
--pst-color-border-muted: rgba(23, 23, 26, 0.2);
--pst-color-inline-code: #912583;
--pst-color-inline-code-links: #246161;
--pst-color-target: #f3cf95;
--pst-color-background: #ffffff;
--pst-color-on-background: #F4F9F8;
--pst-color-surface: #F4F9F8;
--pst-color-on-surface: #222832;
}
html:not([data-theme]) {
--pst-color-link: var(--pst-color-primary);
--pst-color-link-hover: var(--pst-color-secondary);
}
html:not([data-theme]) .only-dark,
html:not([data-theme]) .only-dark ~ figcaption {
display: none !important;
#my-component-root *,
#headlessui-portal-root * {
z-index: 10000;
}
/* NOTE: @each {...} is like a for-loop
* https://sass-lang.com/documentation/at-rules/control/each
*/
html[data-theme=light] {
--pst-color-primary: #287977;
--pst-color-primary-bg: #80D6D3;
--pst-color-secondary: #6F3AED;
--pst-color-secondary-bg: #DAD6FE;
--pst-color-accent: #c132af;
--pst-color-accent-bg: #f8dff5;
--pst-color-info: #276be9;
--pst-color-info-bg: #dce7fc;
--pst-color-warning: #f66a0a;
--pst-color-warning-bg: #f8e3d0;
--pst-color-success: #00843f;
--pst-color-success-bg: #d6ece1;
--pst-color-attention: var(--pst-color-warning);
--pst-color-attention-bg: var(--pst-color-warning-bg);
--pst-color-danger: #d72d47;
--pst-color-danger-bg: #f9e1e4;
--pst-color-text-base: #222832;
--pst-color-text-muted: #48566b;
--pst-color-heading-color: #ffffff;
--pst-color-shadow: rgba(0, 0, 0, 0.1);
--pst-color-border: #d1d5da;
--pst-color-border-muted: rgba(23, 23, 26, 0.2);
--pst-color-inline-code: #912583;
--pst-color-inline-code-links: #246161;
--pst-color-target: #f3cf95;
--pst-color-background: #ffffff;
--pst-color-on-background: #F4F9F8;
--pst-color-surface: #F4F9F8;
--pst-color-on-surface: #222832;
color-scheme: light;
}
html[data-theme=light] {
--pst-color-link: var(--pst-color-primary);
--pst-color-link-hover: var(--pst-color-secondary);
}
html[data-theme=light] .only-dark,
html[data-theme=light] .only-dark ~ figcaption {
display: none !important;
table.longtable code {
white-space: normal;
}
html[data-theme=dark] {
--pst-color-primary: #4FB2AD;
--pst-color-primary-bg: #1C3C3C;
--pst-color-secondary: #7F5CF6;
--pst-color-secondary-bg: #431D95;
--pst-color-accent: #e47fd7;
--pst-color-accent-bg: #46123f;
--pst-color-info: #79a3f2;
--pst-color-info-bg: #06245d;
--pst-color-warning: #ff9245;
--pst-color-warning-bg: #652a02;
--pst-color-success: #5fb488;
--pst-color-success-bg: #002f17;
--pst-color-attention: var(--pst-color-warning);
--pst-color-attention-bg: var(--pst-color-warning-bg);
--pst-color-danger: #e78894;
--pst-color-danger-bg: #4e111b;
--pst-color-text-base: #ced6dd;
--pst-color-text-muted: #9ca4af;
--pst-color-heading-color: #14181e;
--pst-color-shadow: rgba(0, 0, 0, 0.2);
--pst-color-border: #48566b;
--pst-color-border-muted: #29313d;
--pst-color-inline-code: #f3c7ee;
--pst-color-inline-code-links: #4FB2AD;
--pst-color-target: #675c04;
--pst-color-background: #14181e;
--pst-color-on-background: #222832;
--pst-color-surface: #29313d;
--pst-color-on-surface: #f3f4f5;
/* Adjust images in dark mode (unless they have class .only-dark or
* .dark-light, in which case assume they're already optimized for dark
* mode).
*/
/* Give images a light background in dark mode in case they have
* transparency and black text (unless they have class .only-dark or .dark-light, in
* which case assume they're already optimized for dark mode).
*/
color-scheme: dark;
table.longtable td {
max-width: 600px;
}
html[data-theme=dark] {
--pst-color-link: var(--pst-color-primary);
--pst-color-link-hover: var(--pst-color-secondary);
}
html[data-theme=dark] .only-light,
html[data-theme=dark] .only-light ~ figcaption {
display: none !important;
}
html[data-theme=dark] img:not(.only-dark):not(.dark-light) {
filter: brightness(0.8) contrast(1.2);
}
html[data-theme=dark] .bd-content img:not(.only-dark):not(.dark-light) {
background: rgb(255, 255, 255);
border-radius: 0.25rem;
}
html[data-theme=dark] .MathJax_SVG * {
fill: var(--pst-color-text-base);
}
.pst-color-primary {
color: var(--pst-color-primary);
}
.pst-color-secondary {
color: var(--pst-color-secondary);
}
.pst-color-accent {
color: var(--pst-color-accent);
}
.pst-color-info {
color: var(--pst-color-info);
}
.pst-color-warning {
color: var(--pst-color-warning);
}
.pst-color-success {
color: var(--pst-color-success);
}
.pst-color-attention {
color: var(--pst-color-attention);
}
.pst-color-danger {
color: var(--pst-color-danger);
}
.pst-color-text-base {
color: var(--pst-color-text-base);
}
.pst-color-text-muted {
color: var(--pst-color-text-muted);
}
.pst-color-heading-color {
color: var(--pst-color-heading-color);
}
.pst-color-shadow {
color: var(--pst-color-shadow);
}
.pst-color-border {
color: var(--pst-color-border);
}
.pst-color-border-muted {
color: var(--pst-color-border-muted);
}
.pst-color-inline-code {
color: var(--pst-color-inline-code);
}
.pst-color-inline-code-links {
color: var(--pst-color-inline-code-links);
}
.pst-color-target {
color: var(--pst-color-target);
}
.pst-color-background {
color: var(--pst-color-background);
}
.pst-color-on-background {
color: var(--pst-color-on-background);
}
.pst-color-surface {
color: var(--pst-color-surface);
}
.pst-color-on-surface {
color: var(--pst-color-on-surface);
}
/* Adjust the height of the navbar */
.bd-header .bd-header__inner{
height: 52px; /* Adjust this value as needed */
}
.navbar-nav > li > a {
line-height: 52px; /* Vertically center the navbar links */
}
/* Make sure the navbar items align properly */
.navbar-nav {
display: flex;
}
.bd-header .navbar-header-items__start{
margin-left: 0rem
}
.bd-header button.primary-toggle {
margin-right: 0rem;
}
.bd-header ul.navbar-nav .dropdown .dropdown-menu {
overflow-y: auto; /* Enable vertical scrolling */
max-height: 80vh
}
.bd-sidebar-primary {
width: 22%; /* Adjust this value to your preference */
line-height: 1.4;
}
.bd-sidebar-secondary {
line-height: 1.4;
}
.toc-entry a.nav-link, .toc-entry a>code {
background-color: transparent;
border-color: transparent;
}
.bd-sidebar-primary code{
background-color: transparent;
border-color: transparent;
}
.toctree-wrapper li[class^=toctree-l1]>a {
font-size: 1.3em
}
.toctree-wrapper li[class^=toctree-l1] {
margin-bottom: 2em;
}
.toctree-wrapper li[class^=toctree-l]>ul {
margin-top: 0.5em;
font-size: 0.9em;
}
*, :after, :before {
font-style: normal;
}
div.deprecated {
margin-top: 0.5em;
margin-bottom: 2em;
}
.admonition-beta.admonition, div.admonition-beta.admonition {
border-color: var(--pst-color-warning);
margin-top:0.5em;
margin-bottom: 2em;
}
.admonition-beta>.admonition-title, div.admonition-beta>.admonition-title {
background-color: var(--pst-color-warning-bg);
}
dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd {
margin-left: 1rem;
}
p {
font-size: 0.9rem;
margin-bottom: 0.5rem;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 777 B

View File

@@ -1,11 +0,0 @@
<svg width="72" height="19" viewBox="0 0 72 19" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_4019_2020)">
<path d="M29.4038 5.84477C30.1256 6.56657 30.1256 7.74117 29.4038 8.46296L27.7869 10.0538L27.7704 9.96259C27.6524 9.30879 27.3415 8.71552 26.8723 8.24627C26.5189 7.8936 26.1012 7.63282 25.6305 7.47143C25.3383 7.76508 25.1777 8.14989 25.1777 8.55487C25.1777 8.63706 25.1851 8.72224 25.2001 8.80742C25.4593 8.90082 25.6887 9.04503 25.8815 9.23781C26.6033 9.9596 26.6033 11.1342 25.8815 11.856L24.4738 13.2637C24.1129 13.6246 23.6392 13.8047 23.1647 13.8047C22.6902 13.8047 22.2165 13.6246 21.8556 13.2637C21.1338 12.5419 21.1338 11.3673 21.8556 10.6455L23.4725 9.05549L23.489 9.14665C23.6063 9.79896 23.9171 10.3922 24.3879 10.8622C24.742 11.2164 25.1343 11.4518 25.6043 11.6124L25.691 11.5257C25.954 11.2627 26.0982 10.913 26.0982 10.5402C26.0982 10.4572 26.0907 10.3743 26.0765 10.2929C25.8053 10.2032 25.5819 10.0754 25.3786 9.87218C25.0857 9.57928 24.9034 9.20493 24.8526 8.79024C24.8489 8.76035 24.8466 8.73121 24.8437 8.70132C24.8033 8.16109 24.9983 7.63357 25.3786 7.25399L26.7864 5.84627C27.1353 5.49733 27.6001 5.30455 28.0955 5.30455C28.5909 5.30455 29.0556 5.49658 29.4046 5.84627L29.4038 5.84477ZM36.7548 9.56583C36.7548 14.7163 32.5645 18.9058 27.4148 18.9058H9.34C4.1903 18.9058 0 14.7163 0 9.56583C0 4.41538 4.1903 0.22583 9.34 0.22583H27.4148C32.5652 0.22583 36.7548 4.41613 36.7548 9.56583ZM18 14.25C18.1472 14.0714 17.4673 13.5686 17.3283 13.384C17.0459 13.0777 17.0444 12.6368 16.8538 12.2789C16.3876 11.1985 15.8518 10.1262 15.1024 9.21166C14.3104 8.21116 13.333 7.38326 12.4745 6.44403C11.8371 5.78873 11.6668 4.85548 11.1041 4.15087C10.3285 3.00541 7.87624 2.69308 7.51683 4.31077C7.51833 4.36158 7.50264 4.39371 7.45855 4.42584C7.2598 4.57005 7.08271 4.73518 6.93402 4.93468C6.57013 5.44129 6.51409 6.30057 6.96839 6.75561C6.98333 6.51576 6.99155 6.28936 7.18134 6.1175C7.53252 6.41862 8.06304 6.52547 8.47026 6.30057C9.36989 7.585 9.14573 9.36184 9.86005 10.7457C10.0573 11.0729 10.2561 11.4069 10.5094 11.6939C10.7148 12.0137 11.4247 12.391 11.4665 12.6869C11.474 13.195 11.4142 13.7502 11.7475 14.1753C11.9044 14.4936 11.5188 14.8134 11.208 14.7738C10.8045 14.8291 10.3121 14.5026 9.95868 14.7036C9.8339 14.8388 9.58957 14.6894 9.48197 14.8769C9.44461 14.9741 9.24286 15.1108 9.36316 15.2042C9.49691 15.1026 9.62095 14.9965 9.80102 15.057C9.77412 15.2035 9.88994 15.2244 9.98184 15.267C9.97886 15.3663 9.92057 15.468 9.99679 15.5524C10.0857 15.4627 10.1388 15.3357 10.28 15.2983C10.7492 15.9238 11.2267 14.6655 12.2421 15.2318C12.0359 15.2214 11.8528 15.2475 11.7139 15.4172C11.6795 15.4553 11.6503 15.5001 11.7109 15.5494C12.2586 15.196 12.2556 15.6705 12.6112 15.5248C12.8847 15.382 13.1567 15.2035 13.4817 15.2543C13.1657 15.3454 13.153 15.5995 12.9677 15.8139C12.9363 15.8468 12.9213 15.8842 12.9579 15.9387C13.614 15.8834 13.6678 15.6652 14.1975 15.3977C14.5928 15.1564 14.9866 15.7414 15.3288 15.4082C15.4043 15.3357 15.5074 15.3604 15.6008 15.3507C15.4812 14.7133 14.1669 15.4672 14.1878 14.6124C14.6107 14.3247 14.5136 13.7741 14.542 13.3295C15.0284 13.5992 15.5694 13.7561 16.0461 14.0139C16.2867 14.4025 16.6641 14.9158 17.1669 14.8822C17.1804 14.8433 17.1923 14.8089 17.2065 14.7693C17.359 14.7955 17.5547 14.8964 17.6384 14.7036C17.8663 14.9419 18.201 14.93 18.4992 14.8687C18.7196 14.6894 18.0845 14.4338 17.9993 14.2493L18 14.25ZM31.3458 7.15387C31.3458 6.28413 31.0081 5.46744 30.3946 4.85399C29.7812 4.24054 28.9645 3.9028 28.094 3.9028C27.2235 3.9028 26.4068 4.24054 25.7933 4.85399L24.3856 6.26171C24.0569 6.59048 23.8073 6.97678 23.6436 7.40941L23.6339 7.43407L23.6085 7.44154C23.0974 7.5992 22.6469 7.86969 22.2696 8.24702L20.8618 9.65475C19.5938 10.9235 19.5938 12.9873 20.8618 14.2553C21.4753 14.8687 22.292 15.2064 23.1617 15.2064C24.0314 15.2064 24.8489 14.8687 25.4623 14.2553L26.8701 12.8475C27.1973 12.5203 27.4454 12.1355 27.609 11.7036L27.6188 11.6789L27.6442 11.6707C28.1463 11.5168 28.6095 11.2373 28.9854 10.8622L30.3931 9.4545C31.0066 8.84105 31.3443 8.02436 31.3443 7.15387H31.3458ZM12.8802 13.1972C12.7592 13.6695 12.7196 14.4742 12.1054 14.4974C12.0546 14.7701 12.2944 14.8724 12.5119 14.785C12.7278 14.6856 12.8302 14.8635 12.9026 15.0406C13.2359 15.0891 13.7291 14.9292 13.7477 14.5347C13.2501 14.2478 13.0962 13.7023 12.8795 13.1972H12.8802Z" fill="#F4F3FF"/>
<path d="M43.5142 15.2258L47.1462 3.70583H49.9702L53.6022 15.2258H51.6182L48.3222 4.88983H48.7542L45.4982 15.2258H43.5142ZM45.5382 12.7298V10.9298H51.5862V12.7298H45.5382ZM55.0486 15.2258V3.70583H59.8086C59.9206 3.70583 60.0646 3.71116 60.2406 3.72183C60.4166 3.72716 60.5792 3.74316 60.7286 3.76983C61.3952 3.87116 61.9446 4.0925 62.3766 4.43383C62.8139 4.77516 63.1366 5.20716 63.3446 5.72983C63.5579 6.24716 63.6646 6.82316 63.6646 7.45783C63.6646 8.08716 63.5579 8.66316 63.3446 9.18583C63.1312 9.70316 62.8059 10.1325 62.3686 10.4738C61.9366 10.8152 61.3899 11.0365 60.7286 11.1378C60.5792 11.1592 60.4139 11.1752 60.2326 11.1858C60.0566 11.1965 59.9152 11.2018 59.8086 11.2018H56.9766V15.2258H55.0486ZM56.9766 9.40183H59.7286C59.8352 9.40183 59.9552 9.3965 60.0886 9.38583C60.2219 9.37516 60.3446 9.35383 60.4566 9.32183C60.7766 9.24183 61.0272 9.1005 61.2086 8.89783C61.3952 8.69516 61.5259 8.46583 61.6006 8.20983C61.6806 7.95383 61.7206 7.70316 61.7206 7.45783C61.7206 7.2125 61.6806 6.96183 61.6006 6.70583C61.5259 6.4445 61.3952 6.2125 61.2086 6.00983C61.0272 5.80716 60.7766 5.66583 60.4566 5.58583C60.3446 5.55383 60.2219 5.53516 60.0886 5.52983C59.9552 5.51916 59.8352 5.51383 59.7286 5.51383H56.9766V9.40183ZM65.4273 15.2258V3.70583H67.3553V15.2258H65.4273Z" fill="#F4F3FF"/>
</g>
<defs>
<clipPath id="clip0_4019_2020">
<rect width="71.0711" height="18.68" fill="white" transform="translate(0 0.22583)"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 5.7 KiB

View File

@@ -1,11 +0,0 @@
<svg width="72" height="20" viewBox="0 0 72 20" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_4019_689)">
<path d="M29.4038 5.97905C30.1256 6.70085 30.1256 7.87545 29.4038 8.59724L27.7869 10.188L27.7704 10.0969C27.6524 9.44307 27.3415 8.84979 26.8723 8.38055C26.5189 8.02787 26.1012 7.7671 25.6305 7.60571C25.3383 7.89936 25.1777 8.28416 25.1777 8.68915C25.1777 8.77134 25.1851 8.85652 25.2001 8.9417C25.4593 9.0351 25.6887 9.17931 25.8815 9.37209C26.6033 10.0939 26.6033 11.2685 25.8815 11.9903L24.4738 13.398C24.1129 13.7589 23.6392 13.939 23.1647 13.939C22.6902 13.939 22.2165 13.7589 21.8556 13.398C21.1338 12.6762 21.1338 11.5016 21.8556 10.7798L23.4725 9.18977L23.489 9.28093C23.6063 9.93323 23.9171 10.5265 24.3879 10.9965C24.742 11.3507 25.1343 11.586 25.6043 11.7467L25.691 11.66C25.954 11.397 26.0982 11.0473 26.0982 10.6745C26.0982 10.5915 26.0907 10.5086 26.0765 10.4271C25.8053 10.3375 25.5819 10.2097 25.3786 10.0065C25.0857 9.71356 24.9034 9.33921 24.8526 8.92451C24.8489 8.89463 24.8466 8.86549 24.8437 8.8356C24.8033 8.29537 24.9983 7.76785 25.3786 7.38827L26.7864 5.98055C27.1353 5.6316 27.6001 5.43883 28.0955 5.43883C28.5909 5.43883 29.0556 5.63086 29.4046 5.98055L29.4038 5.97905ZM36.7548 9.70011C36.7548 14.8506 32.5645 19.0401 27.4148 19.0401H9.34C4.1903 19.0401 0 14.8506 0 9.70011C0 4.54966 4.1903 0.360107 9.34 0.360107H27.4148C32.5652 0.360107 36.7548 4.55041 36.7548 9.70011ZM18 14.3843C18.1472 14.2057 17.4673 13.7029 17.3283 13.5183C17.0459 13.2119 17.0444 12.7711 16.8538 12.4132C16.3876 11.3327 15.8518 10.2605 15.1024 9.34594C14.3104 8.34543 13.333 7.51754 12.4745 6.57831C11.8371 5.92301 11.6668 4.98976 11.1041 4.28515C10.3285 3.13969 7.87624 2.82736 7.51683 4.44505C7.51833 4.49586 7.50264 4.52799 7.45855 4.56012C7.2598 4.70433 7.08271 4.86946 6.93402 5.06896C6.57013 5.57556 6.51409 6.43484 6.96839 6.88989C6.98333 6.65004 6.99155 6.42364 7.18134 6.25178C7.53252 6.5529 8.06304 6.65975 8.47026 6.43484C9.36989 7.71928 9.14573 9.49612 9.86005 10.8799C10.0573 11.2072 10.2561 11.5412 10.5094 11.8281C10.7148 12.1479 11.4247 12.5253 11.4665 12.8212C11.474 13.3293 11.4142 13.8844 11.7475 14.3096C11.9044 14.6279 11.5188 14.9477 11.208 14.9081C10.8045 14.9634 10.3121 14.6369 9.95868 14.8379C9.8339 14.9731 9.58957 14.8237 9.48197 15.0112C9.44461 15.1083 9.24286 15.2451 9.36316 15.3385C9.49691 15.2369 9.62095 15.1308 9.80102 15.1913C9.77412 15.3377 9.88994 15.3587 9.98184 15.4012C9.97886 15.5006 9.92057 15.6022 9.99679 15.6867C10.0857 15.597 10.1388 15.47 10.28 15.4326C10.7492 16.058 11.2267 14.7997 12.2421 15.3661C12.0359 15.3557 11.8528 15.3818 11.7139 15.5514C11.6795 15.5895 11.6503 15.6344 11.7109 15.6837C12.2586 15.3303 12.2556 15.8047 12.6112 15.659C12.8847 15.5163 13.1567 15.3377 13.4817 15.3885C13.1657 15.4797 13.153 15.7337 12.9677 15.9482C12.9363 15.9811 12.9213 16.0184 12.9579 16.073C13.614 16.0177 13.6678 15.7995 14.1975 15.532C14.5928 15.2907 14.9866 15.8757 15.3288 15.5425C15.4043 15.47 15.5074 15.4946 15.6008 15.4849C15.4812 14.8476 14.1669 15.6015 14.1878 14.7467C14.6107 14.459 14.5136 13.9083 14.542 13.4638C15.0284 13.7335 15.5694 13.8904 16.0461 14.1482C16.2867 14.5367 16.6641 15.0501 17.1669 15.0164C17.1804 14.9776 17.1923 14.9432 17.2065 14.9036C17.359 14.9298 17.5547 15.0306 17.6384 14.8379C17.8663 15.0762 18.201 15.0643 18.4992 15.003C18.7196 14.8237 18.0845 14.5681 17.9993 14.3836L18 14.3843ZM31.3458 7.28815C31.3458 6.41841 31.0081 5.60172 30.3946 4.98826C29.7812 4.37481 28.9645 4.03708 28.094 4.03708C27.2235 4.03708 26.4068 4.37481 25.7933 4.98826L24.3856 6.39599C24.0569 6.72476 23.8073 7.11106 23.6436 7.54369L23.6339 7.56835L23.6085 7.57582C23.0974 7.73348 22.6469 8.00396 22.2696 8.3813L20.8618 9.78902C19.5938 11.0578 19.5938 13.1215 20.8618 14.3895C21.4753 15.003 22.292 15.3407 23.1617 15.3407C24.0314 15.3407 24.8489 15.003 25.4623 14.3895L26.8701 12.9818C27.1973 12.6545 27.4454 12.2697 27.609 11.8378L27.6188 11.8132L27.6442 11.805C28.1463 11.651 28.6095 11.3716 28.9854 10.9965L30.3931 9.58878C31.0066 8.97532 31.3443 8.15863 31.3443 7.28815H31.3458ZM12.8802 13.3315C12.7592 13.8037 12.7196 14.6085 12.1054 14.6316C12.0546 14.9044 12.2944 15.0067 12.5119 14.9193C12.7278 14.8199 12.8302 14.9978 12.9026 15.1748C13.2359 15.2234 13.7291 15.0635 13.7477 14.669C13.2501 14.3821 13.0962 13.8366 12.8795 13.3315H12.8802Z" fill="#246161"/>
<path d="M43.5142 15.3601L47.1462 3.84011H49.9702L53.6022 15.3601H51.6182L48.3222 5.02411H48.7542L45.4982 15.3601H43.5142ZM45.5382 12.8641V11.0641H51.5862V12.8641H45.5382ZM55.0486 15.3601V3.84011H59.8086C59.9206 3.84011 60.0646 3.84544 60.2406 3.85611C60.4166 3.86144 60.5792 3.87744 60.7286 3.90411C61.3952 4.00544 61.9446 4.22677 62.3766 4.56811C62.8139 4.90944 63.1366 5.34144 63.3446 5.86411C63.5579 6.38144 63.6646 6.95744 63.6646 7.59211C63.6646 8.22144 63.5579 8.79744 63.3446 9.32011C63.1312 9.83744 62.8059 10.2668 62.3686 10.6081C61.9366 10.9494 61.3899 11.1708 60.7286 11.2721C60.5792 11.2934 60.4139 11.3094 60.2326 11.3201C60.0566 11.3308 59.9152 11.3361 59.8086 11.3361H56.9766V15.3601H55.0486ZM56.9766 9.53611H59.7286C59.8352 9.53611 59.9552 9.53077 60.0886 9.52011C60.2219 9.50944 60.3446 9.48811 60.4566 9.45611C60.7766 9.37611 61.0272 9.23477 61.2086 9.03211C61.3952 8.82944 61.5259 8.60011 61.6006 8.34411C61.6806 8.08811 61.7206 7.83744 61.7206 7.59211C61.7206 7.34677 61.6806 7.09611 61.6006 6.84011C61.5259 6.57877 61.3952 6.34677 61.2086 6.14411C61.0272 5.94144 60.7766 5.80011 60.4566 5.72011C60.3446 5.68811 60.2219 5.66944 60.0886 5.66411C59.9552 5.65344 59.8352 5.64811 59.7286 5.64811H56.9766V9.53611ZM65.4273 15.3601V3.84011H67.3553V15.3601H65.4273Z" fill="#246161"/>
</g>
<defs>
<clipPath id="clip0_4019_689">
<rect width="71.0711" height="18.68" fill="white" transform="translate(0 0.360107)"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 5.7 KiB

View File

@@ -15,8 +15,6 @@ from pathlib import Path
import toml
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.statemachine import StringList
from sphinx.util.docutils import SphinxDirective
# If extensions (or modules to document with autodoc) are in another directory,
@@ -62,41 +60,26 @@ class ExampleLinksDirective(SphinxDirective):
item_node.append(para_node)
list_node.append(item_node)
if list_node.children:
title_node = nodes.rubric()
title_node = nodes.title()
title_node.append(nodes.Text(f"Examples using {class_or_func_name}"))
return [title_node, list_node]
return [list_node]
class Beta(BaseAdmonition):
required_arguments = 0
node_class = nodes.admonition
def run(self):
self.content = self.content or StringList(
[
(
"This feature is in beta. It is actively being worked on, so the "
"API may change."
)
]
)
self.arguments = self.arguments or ["Beta"]
return super().run()
def setup(app):
app.add_directive("example_links", ExampleLinksDirective)
app.add_directive("beta", Beta)
# -- Project information -----------------------------------------------------
project = "🦜🔗 LangChain"
copyright = "2023, LangChain Inc"
author = "LangChain, Inc"
copyright = "2023, LangChain, Inc."
author = "LangChain, Inc."
html_favicon = "_static/img/brand/favicon.png"
version = data["tool"]["poetry"]["version"]
release = version
html_title = project + " " + version
html_last_updated_fmt = "%b %d, %Y"
@@ -112,13 +95,11 @@ extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinxcontrib.autodoc_pydantic",
"IPython.sphinxext.ipython_console_highlighting",
"myst_parser",
"_extensions.gallery_directive",
"sphinx_design",
"sphinx_copybutton",
"sphinx_panels",
"IPython.sphinxext.ipython_console_highlighting",
]
source_suffix = [".rst", ".md"]
source_suffix = [".rst"]
# some autodoc pydantic options are repeated in the actual template.
# potentially user error, but there may be bugs in the sphinx extension
@@ -150,84 +131,23 @@ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# The theme to use for HTML and HTML Help pages.
html_theme = "pydata_sphinx_theme"
html_theme = "scikit-learn-modern"
html_theme_path = ["themes"]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# # -- General configuration ------------------------------------------------
"sidebar_includehidden": True,
"use_edit_page_button": False,
# # "analytics": {
# # "plausible_analytics_domain": "scikit-learn.org",
# # "plausible_analytics_url": "https://views.scientific-python.org/js/script.js",
# # },
# # If "prev-next" is included in article_footer_items, then setting show_prev_next
# # to True would repeat prev and next links. See
# # https://github.com/pydata/pydata-sphinx-theme/blob/b731dc230bc26a3d1d1bb039c56c977a9b3d25d8/src/pydata_sphinx_theme/theme/pydata_sphinx_theme/layout.html#L118-L129
"show_prev_next": False,
"search_bar_text": "Search",
"navigation_with_keys": True,
"collapse_navigation": True,
"navigation_depth": 3,
"show_nav_level": 1,
"show_toc_level": 3,
"navbar_align": "left",
"header_links_before_dropdown": 5,
"header_dropdown_text": "Integrations",
"logo": {
"image_light": "_static/wordmark-api.svg",
"image_dark": "_static/wordmark-api-dark.svg",
},
"surface_warnings": True,
# # -- Template placement in theme layouts ----------------------------------
"navbar_start": ["navbar-logo"],
# # Note that the alignment of navbar_center is controlled by navbar_align
"navbar_center": ["navbar-nav"],
"navbar_end": ["langchain_docs", "theme-switcher", "navbar-icon-links"],
# # navbar_persistent is persistent right (even when on mobiles)
"navbar_persistent": ["search-field"],
"article_header_start": ["breadcrumbs"],
"article_header_end": [],
"article_footer_items": [],
"content_footer_items": [],
# # Use html_sidebars that map page patterns to list of sidebar templates
# "primary_sidebar_end": [],
"footer_start": ["copyright"],
"footer_center": [],
"footer_end": [],
# # When specified as a dictionary, the keys should follow glob-style patterns, as in
# # https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-exclude_patterns
# # In particular, "**" specifies the default for all pages
# # Use :html_theme.sidebar_secondary.remove: for file-wide removal
# "secondary_sidebar_items": {"**": ["page-toc", "sourcelink"]},
# "show_version_warning_banner": True,
# "announcement": None,
"icon_links": [
{
# Label for this link
"name": "GitHub",
# URL where the link will redirect
"url": "https://github.com/langchain-ai/langchain", # required
# Icon class (if "type": "fontawesome"), or path to local image (if "type": "local")
"icon": "fa-brands fa-square-github",
# The type of image to be used (see below for details)
"type": "fontawesome",
},
{
"name": "X / Twitter",
"url": "https://twitter.com/langchainai",
"icon": "fab fa-twitter-square",
},
],
"icon_links_label": "Quick Links",
"external_links": [
{"name": "Legacy reference", "url": "https://api.python.langchain.com/"},
],
# redirects dictionary maps from old links to new links
html_additional_pages = {}
redirects = {
"index": "langchain_api_reference",
}
for old_link in redirects:
html_additional_pages[old_link] = "redirects.html"
partners_dir = Path(__file__).parent.parent.parent / "libs/partners"
partners = [
(p.name, p.name.replace("-", "_") + "_api_reference")
for p in partners_dir.iterdir()
]
partners = sorted(partners)
html_context = {
"display_github": True, # Integrate GitHub
@@ -235,6 +155,8 @@ html_context = {
"github_repo": "langchain", # Repo name
"github_version": "master", # Version
"conf_py_path": "/docs/api_reference", # Path in the checkout to the docs root
"redirects": redirects,
"partners": partners,
}
# Add any paths that contain custom static files (such as style sheets) here,
@@ -244,7 +166,9 @@ html_static_path = ["_static"]
# These paths are either relative to html_static_path
# or fully qualified paths (e.g. https://...)
html_css_files = ["css/custom.css"]
html_css_files = [
"css/custom.css",
]
html_use_index = False
myst_enable_extensions = ["colon_fence"]
@@ -254,12 +178,3 @@ autosummary_generate = True
html_copy_source = False
html_show_sourcelink = False
# Set canonical URL from the Read the Docs Domain
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
# Tell Jinja2 templates the build is running on Read the Docs
if os.environ.get("READTHEDOCS", "") == "True":
html_context["READTHEDOCS"] = True
master_doc = "index"

View File

@@ -38,8 +38,6 @@ class ClassInfo(TypedDict):
"""The kind of the class."""
is_public: bool
"""Whether the class is public or not."""
is_deprecated: bool
"""Whether the class is deprecated."""
class FunctionInfo(TypedDict):
@@ -51,8 +49,6 @@ class FunctionInfo(TypedDict):
"""The fully qualified name of the function."""
is_public: bool
"""Whether the function is public or not."""
is_deprecated: bool
"""Whether the function is deprecated."""
class ModuleMembers(TypedDict):
@@ -82,7 +78,7 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
continue
if inspect.isclass(type_):
# The type of the class is used to select a template
# The clasification of the class is used to select a template
# for the object when rendering the documentation.
# See `templates` directory for defined templates.
# This is a hacky solution to distinguish between different
@@ -125,7 +121,6 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
qualified_name=f"{namespace}.{name}",
kind=kind,
is_public=not name.startswith("_"),
is_deprecated=".. deprecated::" in (type_.__doc__ or ""),
)
)
elif inspect.isfunction(type_):
@@ -134,7 +129,6 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
name=name,
qualified_name=f"{namespace}.{name}",
is_public=not name.startswith("_"),
is_deprecated=".. deprecated::" in (type_.__doc__ or ""),
)
)
else:
@@ -239,7 +233,7 @@ def _construct_doc(
package_namespace: str,
members_by_namespace: Dict[str, ModuleMembers],
package_version: str,
) -> List[typing.Tuple[str, str]]:
) -> str:
"""Construct the contents of the reference.rst file for the given package.
Args:
@@ -251,62 +245,23 @@ def _construct_doc(
Returns:
The contents of the reference.rst file.
"""
docs = []
index_doc = f"""\
:html_theme.sidebar_secondary.remove:
full_doc = f"""\
=======================
``{package_namespace}`` {package_version}
=======================
.. currentmodule:: {package_namespace}
.. _{package_namespace}:
======================================
{package_namespace.replace('_', '-')}: {package_version}
======================================
.. automodule:: {package_namespace}
:no-members:
:no-inherited-members:
.. toctree::
:hidden:
:maxdepth: 2
"""
index_autosummary = """
"""
namespaces = sorted(members_by_namespace)
for module in namespaces:
index_doc += f" {module}\n"
module_doc = f"""\
.. currentmodule:: {package_namespace}
.. _{package_namespace}_{module}:
"""
_members = members_by_namespace[module]
classes = [
el
for el in _members["classes_"]
if el["is_public"] and not el["is_deprecated"]
]
functions = [
el
for el in _members["functions"]
if el["is_public"] and not el["is_deprecated"]
]
deprecated_classes = [
el for el in _members["classes_"] if el["is_public"] and el["is_deprecated"]
]
deprecated_functions = [
el
for el in _members["functions"]
if el["is_public"] and el["is_deprecated"]
]
classes = [el for el in _members["classes_"] if el["is_public"]]
functions = [el for el in _members["functions"] if el["is_public"]]
if not (classes or functions):
continue
section = f":mod:`{module}`"
section = f":mod:`{package_namespace}.{module}`"
underline = "=" * (len(section) + 1)
module_doc += f"""
full_doc += f"""\
{section}
{underline}
@@ -314,26 +269,16 @@ def _construct_doc(
:no-members:
:no-inherited-members:
"""
index_autosummary += f"""
:ref:`{package_namespace}_{module}`
{'^' * (len(module) + 5)}
"""
if classes:
module_doc += f"""\
**Classes**
full_doc += f"""\
Classes
--------------
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
"""
index_autosummary += """
**Classes**
.. autosummary::
"""
for class_ in sorted(classes, key=lambda c: c["qualified_name"]):
@@ -350,22 +295,19 @@ def _construct_doc(
else:
template = "class.rst"
module_doc += f"""\
full_doc += f"""\
:template: {template}
{class_["qualified_name"]}
"""
index_autosummary += f"""
{class_['qualified_name']}
"""
if functions:
_functions = [f["qualified_name"] for f in functions]
fstring = "\n ".join(sorted(_functions))
module_doc += f"""\
**Functions**
full_doc += f"""\
Functions
--------------
.. currentmodule:: {package_namespace}
.. autosummary::
@@ -375,81 +317,7 @@ def _construct_doc(
{fstring}
"""
index_autosummary += f"""
**Functions**
.. autosummary::
{fstring}
"""
if deprecated_classes:
module_doc += f"""\
**Deprecated classes**
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
"""
index_autosummary += """
**Deprecated classes**
.. autosummary::
"""
for class_ in sorted(deprecated_classes, key=lambda c: c["qualified_name"]):
if class_["kind"] == "TypedDict":
template = "typeddict.rst"
elif class_["kind"] == "enum":
template = "enum.rst"
elif class_["kind"] == "Pydantic":
template = "pydantic.rst"
elif class_["kind"] == "RunnablePydantic":
template = "runnable_pydantic.rst"
elif class_["kind"] == "RunnableNonPydantic":
template = "runnable_non_pydantic.rst"
else:
template = "class.rst"
module_doc += f"""\
:template: {template}
{class_["qualified_name"]}
"""
index_autosummary += f"""
{class_['qualified_name']}
"""
if deprecated_functions:
_functions = [f["qualified_name"] for f in deprecated_functions]
fstring = "\n ".join(sorted(_functions))
module_doc += f"""\
**Deprecated functions**
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
:template: function.rst
{fstring}
"""
index_autosummary += f"""
**Deprecated functions**
.. autosummary::
{fstring}
"""
docs.append((f"{module}.rst", module_doc))
docs.append(("index.rst", index_doc + index_autosummary))
return docs
return full_doc
def _build_rst_file(package_name: str = "langchain") -> None:
@@ -461,14 +329,13 @@ def _build_rst_file(package_name: str = "langchain") -> None:
package_dir = _package_dir(package_name)
package_members = _load_package_modules(package_dir)
package_version = _get_package_version(package_dir)
output_dir = _out_file_path(package_name)
os.mkdir(output_dir)
rsts = _construct_doc(
_package_namespace(package_name), package_members, package_version
)
for name, rst in rsts:
with open(output_dir / name, "w") as f:
f.write(rst)
with open(_out_file_path(package_name), "w") as f:
f.write(
_doc_first_line(package_name)
+ _construct_doc(
_package_namespace(package_name), package_members, package_version
)
)
def _package_namespace(package_name: str) -> str:
@@ -518,117 +385,12 @@ def _get_package_version(package_dir: Path) -> str:
def _out_file_path(package_name: str) -> Path:
"""Return the path to the file containing the documentation."""
return HERE / f"{package_name.replace('-', '_')}"
return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
def _build_index(dirs: List[str]) -> None:
custom_names = {
"airbyte": "Airbyte",
"aws": "AWS",
"ai21": "AI21",
}
ordered = ["core", "langchain", "text-splitters", "community", "experimental"]
main_ = [dir_ for dir_ in ordered if dir_ in dirs]
integrations = sorted(dir_ for dir_ in dirs if dir_ not in main_)
main_headers = [
" ".join(custom_names.get(x, x.title()) for x in dir_.split("-"))
for dir_ in main_
]
integration_headers = [
" ".join(
custom_names.get(x, x.title().replace("ai", "AI").replace("db", "DB"))
for x in dir_.split("-")
)
for dir_ in integrations
]
main_tree = "\n".join(
f"{header_name}<{dir_.replace('-', '_')}/index>"
for header_name, dir_ in zip(main_headers, main_)
)
main_grid = "\n".join(
f'- header: "**{header_name}**"\n content: "{_package_namespace(dir_).replace("_", "-")}: {_get_package_version(_package_dir(dir_))}"\n link: {dir_.replace("-", "_")}/index.html'
for header_name, dir_ in zip(main_headers, main_)
)
integration_tree = "\n".join(
f"{header_name}<{dir_.replace('-', '_')}/index>"
for header_name, dir_ in zip(integration_headers, integrations)
)
integration_grid = ""
integrations_to_show = [
"openai",
"anthropic",
"google-vertexai",
"aws",
"huggingface",
"mistralai",
]
for header_name, dir_ in sorted(
zip(integration_headers, integrations),
key=lambda h_d: integrations_to_show.index(h_d[1])
if h_d[1] in integrations_to_show
else len(integrations_to_show),
)[: len(integrations_to_show)]:
integration_grid += f'\n- header: "**{header_name}**"\n content: {_package_namespace(dir_).replace("_", "-")} {_get_package_version(_package_dir(dir_))}\n link: {dir_.replace("-", "_")}/index.html'
doc = f"""# LangChain Python API Reference
Welcome to the LangChain Python API reference. This is a reference for all
`langchain-x` packages.
For user guides see [https://python.langchain.com](https://python.langchain.com).
For the legacy API reference hosted on ReadTheDocs see [https://api.python.langchain.com/](https://api.python.langchain.com/).
## Base packages
```{{gallery-grid}}
:grid-columns: "1 2 2 3"
{main_grid}
```
```{{toctree}}
:maxdepth: 2
:hidden:
:caption: Base packages
{main_tree}
```
## Integrations
```{{gallery-grid}}
:grid-columns: "1 2 2 3"
{integration_grid}
```
See the full list of integrations in the Section Navigation.
```{{toctree}}
:maxdepth: 2
:hidden:
:caption: Integrations
{integration_tree}
```
"""
with open(HERE / "reference.md", "w") as f:
f.write(doc)
dummy_index = """\
# API reference
```{toctree}
:maxdepth: 3
:hidden:
Reference<reference>
```
"""
with open(HERE / "index.md", "w") as f:
f.write(dummy_index)
def _doc_first_line(package_name: str) -> str:
"""Return the path to the file containing the documentation."""
return f".. {package_name.replace('-', '_')}_api_reference:\n\n"
def main(dirs: Optional[list] = None) -> None:
@@ -656,8 +418,6 @@ def main(dirs: Optional[list] = None) -> None:
else:
print("Building package:", dir_)
_build_rst_file(package_name=dir_)
_build_index(dirs)
print("API reference files built.")

View File

@@ -0,0 +1,8 @@
=============
LangChain API
=============
.. toctree::
:maxdepth: 2
api_reference.rst

View File

@@ -1,11 +1,17 @@
autodoc_pydantic>=1,<2
sphinx<=7
myst-parser>=3
sphinx-autobuild>=2024
pydata-sphinx-theme>=0.15
toml>=0.10.2
myst-nb>=1.1.1
pyyaml
sphinx-design
sphinx-copybutton
beautifulsoup4
-e libs/experimental
-e libs/langchain
-e libs/core
-e libs/community
pydantic<2
autodoc_pydantic==1.8.0
myst_parser
nbsphinx==0.8.9
sphinx>=5
sphinx-autobuild==2021.3.14
sphinx_rtd_theme==1.0.0
sphinx-typlog-theme==0.8.0
sphinx-panels
toml
myst_nb
sphinx_copybutton
pydata-sphinx-theme==0.13.1

View File

@@ -1,41 +0,0 @@
import sys
from glob import glob
from pathlib import Path
from bs4 import BeautifulSoup
CUR_DIR = Path(__file__).parents[1]
def process_toc_h3_elements(html_content: str) -> str:
"""Update Class.method() TOC headers to just method()."""
# Create a BeautifulSoup object
soup = BeautifulSoup(html_content, "html.parser")
# Find all <li> elements with class "toc-h3"
toc_h3_elements = soup.find_all("li", class_="toc-h3")
# Process each element
for element in toc_h3_elements:
element = element.a.code.span
# Get the text content of the element
content = element.get_text()
# Apply the regex substitution
modified_content = content.split(".")[-1]
# Update the element's content
element.string = modified_content
# Return the modified HTML
return str(soup)
if __name__ == "__main__":
dir = sys.argv[1]
for fn in glob(str(f"{dir.rstrip('/')}/**/*.html"), recursive=True):
with open(fn, "r") as f:
html = f.read()
processed_html = process_toc_h3_elements(html)
with open(fn, "w") as f:
f.write(processed_html)

View File

@@ -1,4 +1,4 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. currentmodule:: {{ module }}
@@ -11,7 +11,7 @@
.. autosummary::
{% for item in attributes %}
~{{ item }}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
@@ -22,11 +22,11 @@
.. autosummary::
{% for item in methods %}
~{{ item }}
~{{ name }}.{{ item }}
{%- endfor %}
{% for item in methods %}
.. automethod:: {{ item }}
.. automethod:: {{ name }}.{{ item }}
{%- endfor %}
{% endif %}

View File

@@ -1,4 +1,4 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. currentmodule:: {{ module }}

View File

@@ -1,4 +1,4 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. currentmodule:: {{ module }}

View File

@@ -1,12 +0,0 @@
<!-- This will display a link to LangChain docs -->
<head>
<style>
.text-link {
text-decoration: none; /* Remove underline */
color: inherit; /* Inherit color from parent element */
}
</style>
</head>
<body>
<a href="https://python.langchain.com/" class='text-link'>Docs</a>
</body>

View File

@@ -1,4 +1,4 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. currentmodule:: {{ module }}

View File

@@ -1,21 +1,20 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
{% block attributes %}
{% if attributes %}
.. rubric:: {{ _('Attributes') }}
.. autosummary::
{% for item in attributes %}
~{{ item }}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
@@ -26,11 +25,11 @@
.. autosummary::
{% for item in methods %}
~{{ item }}
~{{ name }}.{{ item }}
{%- endfor %}
{% for item in methods %}
.. automethod:: {{ item }}
.. automethod:: {{ name }}.{{ item }}
{%- endfor %}
{% endif %}

View File

@@ -1,6 +1,8 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
.. currentmodule:: {{ module }}
.. autopydantic_model:: {{ objname }}
@@ -15,10 +17,6 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, invoke, ainvoke, batch, abatch, batch_as_completed, abatch_as_completed, astream_log, stream, astream, astream_events, transform, atransform, get_output_schema, get_prompts, configurable_fields, configurable_alternatives, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
.. example_links:: {{ objname }}

View File

@@ -1,4 +1,4 @@
{{ objname }}
:mod:`{{module}}`.{{objname}}
{{ underline }}==============
.. currentmodule:: {{ module }}

View File

@@ -0,0 +1,27 @@
Copyright (c) 2007-2023 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,67 @@
<script>
$(document).ready(function() {
/* Add a [>>>] button on the top-right corner of code samples to hide
* the >>> and ... prompts and the output and thus make the code
* copyable. */
var div = $('.highlight-python .highlight,' +
'.highlight-python3 .highlight,' +
'.highlight-pycon .highlight,' +
'.highlight-default .highlight')
var pre = div.find('pre');
// get the styles from the current theme
pre.parent().parent().css('position', 'relative');
var hide_text = 'Hide prompts and outputs';
var show_text = 'Show prompts and outputs';
// create and add the button to all the code blocks that contain >>>
div.each(function(index) {
var jthis = $(this);
if (jthis.find('.gp').length > 0) {
var button = $('<span class="copybutton">&gt;&gt;&gt;</span>');
button.attr('title', hide_text);
button.data('hidden', 'false');
jthis.prepend(button);
}
// tracebacks (.gt) contain bare text elements that need to be
// wrapped in a span to work with .nextUntil() (see later)
jthis.find('pre:has(.gt)').contents().filter(function() {
return ((this.nodeType == 3) && (this.data.trim().length > 0));
}).wrap('<span>');
});
// define the behavior of the button when it's clicked
$('.copybutton').click(function(e){
e.preventDefault();
var button = $(this);
if (button.data('hidden') === 'false') {
// hide the code output
button.parent().find('.go, .gp, .gt').hide();
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
button.css('text-decoration', 'line-through');
button.attr('title', show_text);
button.data('hidden', 'true');
} else {
// show the code output
button.parent().find('.go, .gp, .gt').show();
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
button.css('text-decoration', 'none');
button.attr('title', hide_text);
button.data('hidden', 'false');
}
});
/*** Add permalink buttons next to glossary terms ***/
$('dl.glossary > dt[id]').append(function() {
return ('<a class="headerlink" href="#' +
this.getAttribute('id') +
'" title="Permalink to this term">¶</a>');
});
});
</script>
{%- if pagename != 'index' and pagename != 'documentation' %}
{% if theme_mathjax_path %}
<script id="MathJax-script" async src="{{ theme_mathjax_path }}"></script>
{% endif %}
{%- endif %}

View File

@@ -0,0 +1,132 @@
{# TEMPLATE VAR SETTINGS #}
{%- set url_root = pathto('', 1) %}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " &mdash; "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
{%- set lang_attr = 'en' %}
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
<!--[if gt IE 8]><!-->
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
<head>
<meta charset="utf-8">
{{ metatags }}
<meta name="viewport" content="width=device-width, initial-scale=1.0">
{% block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{% endblock %}
<link rel="canonical"
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
{% if favicon_url %}
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
{% endif %}
<link rel="stylesheet"
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
type="text/css"/>
{%- for css in css_files %}
{%- if css|attr("rel") %}
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
type="text/css"{% if css.title is not none %}
title="{{ css.title }}"{% endif %} />
{%- else %}
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
{%- endif %}
{%- endfor %}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
{%- block extrahead %} {% endblock %}
</head>
<body>
{% include "nav.html" %}
{%- block content %}
<div class="d-flex" id="sk-doc-wrapper">
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
for="sk-toggle-checkbox">Toggle Menu</label>
<div id="sk-sidebar-wrapper" class="border-right">
<div class="sk-sidebar-toc-wrapper">
{%- if meta and meta['parenttoc']|tobool %}
<div class="sk-sidebar-toc">
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
<ul>
{% for main_nav_item in nav %}
{% if main_nav_item.active %}
<li>
<a href="{{ main_nav_item.url }}"
class="sk-toc-active">{{ main_nav_item.title }}</a>
</li>
<ul>
{% for nav_item in main_nav_item.children %}
<li>
<a href="{{ nav_item.url }}"
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
{% if nav_item.children %}
<ul>
{% for inner_child in nav_item.children %}
<li class="sk-toctree-l3">
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
</li>
{% endfor %}
</ul>
{% endif %}
</li>
{% endfor %}
</ul>
{% endif %}
{% endfor %}
</ul>
</div>
{%- elif meta and meta['globalsidebartoc']|tobool %}
<div class="sk-sidebar-toc sk-sidebar-global-toc">
{{ toctree(maxdepth=2, titles_only=True) }}
</div>
{%- else %}
<div class="sk-sidebar-toc">
{{ toc }}
</div>
{%- endif %}
</div>
</div>
<div id="sk-page-content-wrapper">
<div class="sk-page-content container-fluid body px-md-3" role="main">
{% block body %}{% endblock %}
</div>
<div class="container">
<footer class="sk-content-footer">
{%- if pagename != 'index' %}
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% trans path=pathto('copyright'), copyright=copyright|e %}
&copy; {{ copyright }}.{% endtrans %}
{%- else %}
{% trans copyright=copyright|e %}&copy; {{ copyright }}
.{% endtrans %}
{%- endif %}
{%- endif %}
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated
on {{ last_updated }}.{% endtrans %}
{%- endif %}
{%- if show_source and has_source and sourcename %}
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
rel="nofollow">{{ _('Show this page source') }}</a>
{%- endif %}
{%- endif %}
</footer>
</div>
</div>
</div>
{%- endblock %}
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
{% include "javascript.html" %}
</body>
</html>

View File

@@ -0,0 +1,78 @@
{%- if pagename != 'index' and pagename != 'documentation' %}
{%- set nav_bar_class = "sk-docs-navbar" %}
{%- set top_container_cls = "sk-docs-container" %}
{%- else %}
{%- set nav_bar_class = "sk-landing-navbar" %}
{%- set top_container_cls = "sk-landing-container" %}
{%- endif %}
<nav id="navbar" class="{{ nav_bar_class }} navbar navbar-expand-md navbar-light bg-light py-0">
<div class="container-fluid {{ top_container_cls }} px-0">
{%- if logo_url %}
<a class="navbar-brand py-0" href="{{ pathto('index') }}">
<img
class="sk-brand-img"
src="{{ logo_url|e }}"
alt="logo"/>
</a>
{%- endif %}
<button
id="sk-navbar-toggler"
class="navbar-toggler"
type="button"
data-toggle="collapse"
data-target="#navbarSupportedContent"
aria-controls="navbarSupportedContent"
aria-expanded="false"
aria-label="Toggle navigation"
>
<span class="navbar-toggler-icon"></span>
</button>
<div class="sk-navbar-collapse collapse navbar-collapse" id="navbarSupportedContent">
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="sk-nav-link nav-link" href="{{ pathto('langchain_api_reference') }}">LangChain</a>
</li>
<li class="nav-item">
<a class="sk-nav-link nav-link" href="{{ pathto('core_api_reference') }}">Core</a>
</li>
<li class="nav-item">
<a class="sk-nav-link nav-link" href="{{ pathto('community_api_reference') }}">Community</a>
</li>
<li class="nav-item">
<a class="sk-nav-link nav-link" href="{{ pathto('experimental_api_reference') }}">Experimental</a>
</li>
<li class="nav-item">
<a class="sk-nav-link nav-link" href="{{ pathto('text_splitters_api_reference') }}">Text splitters</a>
</li>
{%- for title, pathname in partners %}
<li class="nav-item">
<a class="sk-nav-link nav-link nav-more-item-mobile-items" href="{{ pathto(pathname) }}">{{ title }}</a>
</li>
{%- endfor %}
<li class="nav-item dropdown nav-more-item-dropdown">
<a class="sk-nav-link nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Partner libs</a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown">
{%- for title, pathname in partners %}
<a class="sk-nav-dropdown-item dropdown-item" href="{{ pathto(pathname) }}">{{ title }}</a>
{%- endfor %}
</div>
</li>
<li class="nav-item">
<a class="sk-nav-link nav-link" target="_blank" rel="noopener noreferrer" href="https://python.langchain.com/">Docs</a>
</li>
</ul>
{%- if pagename != "search"%}
<div id="searchbox" role="search">
<div class="searchformwrapper">
<form class="search" action="{{ pathto('search') }}" method="get">
<input class="sk-search-text-input" type="text" name="q" aria-labelledby="searchlabel" />
<input class="sk-search-text-btn" type="submit" value="{{ _('Go') }}" />
</form>
</div>
</div>
{%- endif %}
</div>
</div>
</nav>

View File

@@ -0,0 +1,16 @@
{%- extends "basic/search.html" %}
{% block extrahead %}
<script type="text/javascript" src="{{ pathto('_static/underscore.js', 1) }}"></script>
<script type="text/javascript" src="{{ pathto('searchindex.js', 1) }}" defer></script>
<script type="text/javascript" src="{{ pathto('_static/doctools.js', 1) }}"></script>
<script type="text/javascript" src="{{ pathto('_static/language_data.js', 1) }}"></script>
<script type="text/javascript" src="{{ pathto('_static/searchtools.js', 1) }}"></script>
<script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script>
<script type="text/javascript">
$(document).ready(function() {
if (!Search.out) {
Search.init();
}
});
</script>
{% endblock %}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,8 @@
[theme]
inherit = basic
pygments_style = default
stylesheet = css/theme.css
[options]
link_to_live_contributing_page = false
mathjax_path =

File diff suppressed because it is too large Load Diff

View File

@@ -4,11 +4,8 @@ LangChain implements the latest research in the field of Natural Language Proces
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
Templates, and Cookbooks.
From the opposite direction, scientists use `LangChain` in research and reference it in the research papers.
Here you find papers that reference:
- [LangChain](https://arxiv.org/search/?query=langchain&searchtype=all&source=header)
- [LangGraph](https://arxiv.org/search/?query=langgraph&searchtype=all&source=header)
- [LangSmith](https://arxiv.org/search/?query=langsmith&searchtype=all&source=header)
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
## Summary
@@ -26,30 +23,32 @@ Here you find papers that reference:
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
| `2305.02156v1` [Zero-Shot Listwise Document Reranking with a Large Language Model](http://arxiv.org/abs/2305.02156v1) | Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al. | 2023-05-03 | `API:` [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain), [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
| `2205.13147v4` [Matryoshka Representation Learning](http://arxiv.org/abs/2205.13147v4) | Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al. | 2022-05-26 | `Docs:` [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
- **arXiv id:** [2402.03620v1](http://arxiv.org/abs/2402.03620v1) **Published Date:** 2024-02-06
- **arXiv id:** 2402.03620v1
- **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures
- **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.
- **Published Date:** 2024-02-06
- **URL:** http://arxiv.org/abs/2402.03620v1
- **LangChain:**
- **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
@@ -71,9 +70,11 @@ commonalities with human reasoning patterns.
## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
- **arXiv id:** [2401.18059v1](http://arxiv.org/abs/2401.18059v1) **Published Date:** 2024-01-31
- **arXiv id:** 2401.18059v1
- **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
- **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.
- **Published Date:** 2024-01-31
- **URL:** http://arxiv.org/abs/2401.18059v1
- **LangChain:**
- **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
@@ -95,9 +96,11 @@ benchmark by 20% in absolute accuracy.
## Corrective Retrieval Augmented Generation
- **arXiv id:** [2401.15884v2](http://arxiv.org/abs/2401.15884v2) **Published Date:** 2024-01-29
- **arXiv id:** 2401.15884v2
- **Title:** Corrective Retrieval Augmented Generation
- **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.
- **Published Date:** 2024-01-29
- **URL:** http://arxiv.org/abs/2401.15884v2
- **LangChain:**
- **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
@@ -123,9 +126,11 @@ performance of RAG-based approaches.
## Mixtral of Experts
- **arXiv id:** [2401.04088v1](http://arxiv.org/abs/2401.04088v1) **Published Date:** 2024-01-08
- **arXiv id:** 2401.04088v1
- **Title:** Mixtral of Experts
- **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.
- **Published Date:** 2024-01-08
- **URL:** http://arxiv.org/abs/2401.04088v1
- **LangChain:**
- **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
@@ -147,9 +152,11 @@ the base and instruct models are released under the Apache 2.0 license.
## Dense X Retrieval: What Retrieval Granularity Should We Use?
- **arXiv id:** [2312.06648v2](http://arxiv.org/abs/2312.06648v2) **Published Date:** 2023-12-11
- **arXiv id:** 2312.06648v2
- **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?
- **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.
- **Published Date:** 2023-12-11
- **URL:** http://arxiv.org/abs/2312.06648v2
- **LangChain:**
- **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
@@ -174,9 +181,11 @@ information.
## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
- **arXiv id:** [2311.09210v1](http://arxiv.org/abs/2311.09210v1) **Published Date:** 2023-11-15
- **arXiv id:** 2311.09210v1
- **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
- **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.
- **Published Date:** 2023-11-15
- **URL:** http://arxiv.org/abs/2311.09210v1
- **LangChain:**
- **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
@@ -206,9 +215,11 @@ outside the pre-training knowledge scope.
## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
- **arXiv id:** [2310.11511v1](http://arxiv.org/abs/2310.11511v1) **Published Date:** 2023-10-17
- **arXiv id:** 2310.11511v1
- **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
- **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.
- **Published Date:** 2023-10-17
- **URL:** http://arxiv.org/abs/2310.11511v1
- **LangChain:**
- **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
@@ -237,9 +248,11 @@ to these models.
## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
- **arXiv id:** [2310.06117v2](http://arxiv.org/abs/2310.06117v2) **Published Date:** 2023-10-09
- **arXiv id:** 2310.06117v2
- **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
- **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.
- **Published Date:** 2023-10-09
- **URL:** http://arxiv.org/abs/2310.06117v2
- **LangChain:**
- **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
@@ -258,9 +271,11 @@ and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
## Llama 2: Open Foundation and Fine-Tuned Chat Models
- **arXiv id:** [2307.09288v2](http://arxiv.org/abs/2307.09288v2) **Published Date:** 2023-07-18
- **arXiv id:** 2307.09288v2
- **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models
- **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.
- **Published Date:** 2023-07-18
- **URL:** http://arxiv.org/abs/2307.09288v2
- **LangChain:**
- **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
@@ -277,9 +292,11 @@ contribute to the responsible development of LLMs.
## Query Rewriting for Retrieval-Augmented Large Language Models
- **arXiv id:** [2305.14283v3](http://arxiv.org/abs/2305.14283v3) **Published Date:** 2023-05-23
- **arXiv id:** 2305.14283v3
- **Title:** Query Rewriting for Retrieval-Augmented Large Language Models
- **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.
- **Published Date:** 2023-05-23
- **URL:** http://arxiv.org/abs/2305.14283v3
- **LangChain:**
- **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
@@ -305,9 +322,11 @@ for retrieval-augmented LLM.
## Large Language Model Guided Tree-of-Thought
- **arXiv id:** [2305.08291v1](http://arxiv.org/abs/2305.08291v1) **Published Date:** 2023-05-15
- **arXiv id:** 2305.08291v1
- **Title:** Large Language Model Guided Tree-of-Thought
- **Authors:** Jieyi Long
- **Published Date:** 2023-05-15
- **URL:** http://arxiv.org/abs/2305.08291v1
- **LangChain:**
- **API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
@@ -333,9 +352,11 @@ implementation of the ToT-based Sudoku solver is available on GitHub:
## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
- **arXiv id:** [2305.04091v3](http://arxiv.org/abs/2305.04091v3) **Published Date:** 2023-05-06
- **arXiv id:** 2305.04091v3
- **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
- **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.
- **Published Date:** 2023-05-06
- **URL:** http://arxiv.org/abs/2305.04091v3
- **LangChain:**
- **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
@@ -362,35 +383,13 @@ Prompting, and has comparable performance with 8-shot CoT prompting on the math
reasoning problem. The code can be found at
https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
## Zero-Shot Listwise Document Reranking with a Large Language Model
- **arXiv id:** [2305.02156v1](http://arxiv.org/abs/2305.02156v1) **Published Date:** 2023-05-03
- **Title:** Zero-Shot Listwise Document Reranking with a Large Language Model
- **Authors:** Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al.
- **LangChain:**
- **API Reference:** [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
**Abstract:** Supervised ranking methods based on bi-encoder or cross-encoder architectures
have shown success in multi-stage text ranking tasks, but they require large
amounts of relevance judgments as training data. In this work, we propose
Listwise Reranker with a Large Language Model (LRL), which achieves strong
reranking effectiveness without using any task-specific training data.
Different from the existing pointwise ranking methods, where documents are
scored independently and ranked according to the scores, LRL directly generates
a reordered list of document identifiers given the candidate documents.
Experiments on three TREC web search datasets demonstrate that LRL not only
outperforms zero-shot pointwise methods when reranking first-stage retrieval
results, but can also act as a final-stage reranker to improve the top-ranked
results of a pointwise method for improved efficiency. Additionally, we apply
our approach to subsets of MIRACL, a recent multilingual retrieval dataset,
with results showing its potential to generalize across different languages.
## Visual Instruction Tuning
- **arXiv id:** [2304.08485v2](http://arxiv.org/abs/2304.08485v2) **Published Date:** 2023-04-17
- **arXiv id:** 2304.08485v2
- **Title:** Visual Instruction Tuning
- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.
- **Published Date:** 2023-04-17
- **URL:** http://arxiv.org/abs/2304.08485v2
- **LangChain:**
- **Cookbook:** [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
@@ -413,9 +412,11 @@ publicly available.
## Generative Agents: Interactive Simulacra of Human Behavior
- **arXiv id:** [2304.03442v2](http://arxiv.org/abs/2304.03442v2) **Published Date:** 2023-04-07
- **arXiv id:** 2304.03442v2
- **Title:** Generative Agents: Interactive Simulacra of Human Behavior
- **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.
- **Published Date:** 2023-04-07
- **URL:** http://arxiv.org/abs/2304.03442v2
- **LangChain:**
- **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
@@ -447,9 +448,11 @@ interaction patterns for enabling believable simulations of human behavior.
## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
- **arXiv id:** [2303.17760v2](http://arxiv.org/abs/2303.17760v2) **Published Date:** 2023-03-31
- **arXiv id:** 2303.17760v2
- **Title:** CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
- **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.
- **Published Date:** 2023-03-31
- **URL:** http://arxiv.org/abs/2303.17760v2
- **LangChain:**
- **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
@@ -475,9 +478,11 @@ agents and beyond: https://github.com/camel-ai/camel.
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
- **arXiv id:** [2303.17580v4](http://arxiv.org/abs/2303.17580v4) **Published Date:** 2023-03-30
- **arXiv id:** 2303.17580v4
- **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
- **Published Date:** 2023-03-30
- **URL:** http://arxiv.org/abs/2303.17580v4
- **LangChain:**
- **API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
@@ -503,14 +508,40 @@ modalities and domains and achieve impressive results in language, vision,
speech, and other challenging tasks, which paves a new way towards the
realization of artificial general intelligence.
## A Watermark for Large Language Models
## GPT-4 Technical Report
- **arXiv id:** [2301.10226v4](http://arxiv.org/abs/2301.10226v4) **Published Date:** 2023-01-24
- **Title:** A Watermark for Large Language Models
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
- **arXiv id:** 2303.08774v6
- **Title:** GPT-4 Technical Report
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
- **Published Date:** 2023-03-15
- **URL:** http://arxiv.org/abs/2303.08774v6
- **LangChain:**
- **API Reference:** [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
- **Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
accept image and text inputs and produce text outputs. While less capable than
humans in many real-world scenarios, GPT-4 exhibits human-level performance on
various professional and academic benchmarks, including passing a simulated bar
exam with a score around the top 10% of test takers. GPT-4 is a
Transformer-based model pre-trained to predict the next token in a document.
The post-training alignment process results in improved performance on measures
of factuality and adherence to desired behavior. A core component of this
project was developing infrastructure and optimization methods that behave
predictably across a wide range of scales. This allowed us to accurately
predict some aspects of GPT-4's performance based on models trained with no
more than 1/1,000th the compute of GPT-4.
## A Watermark for Large Language Models
- **arXiv id:** 2301.10226v4
- **Title:** A Watermark for Large Language Models
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
- **Published Date:** 2023-01-24
- **URL:** http://arxiv.org/abs/2301.10226v4
- **LangChain:**
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
**Abstract:** Potential harms of large language models can be mitigated by watermarking
model output, i.e., embedding signals into generated text that are invisible to
@@ -528,9 +559,11 @@ family, and discuss robustness and security.
## Precise Zero-Shot Dense Retrieval without Relevance Labels
- **arXiv id:** [2212.10496v1](http://arxiv.org/abs/2212.10496v1) **Published Date:** 2022-12-20
- **arXiv id:** 2212.10496v1
- **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
- **Published Date:** 2022-12-20
- **URL:** http://arxiv.org/abs/2212.10496v1
- **LangChain:**
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
@@ -557,9 +590,11 @@ search, QA, fact verification) and languages~(e.g. sw, ko, ja).
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
- **arXiv id:** [2212.07425v3](http://arxiv.org/abs/2212.07425v3) **Published Date:** 2022-12-12
- **arXiv id:** 2212.07425v3
- **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
- **Published Date:** 2022-12-12
- **URL:** http://arxiv.org/abs/2212.07425v3
- **LangChain:**
- **API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
@@ -588,9 +623,11 @@ further work on logical fallacy identification.
## Complementary Explanations for Effective In-Context Learning
- **arXiv id:** [2211.13892v2](http://arxiv.org/abs/2211.13892v2) **Published Date:** 2022-11-25
- **arXiv id:** 2211.13892v2
- **Title:** Complementary Explanations for Effective In-Context Learning
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
- **Published Date:** 2022-11-25
- **URL:** http://arxiv.org/abs/2211.13892v2
- **LangChain:**
- **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
@@ -614,12 +651,14 @@ performance across three real-world tasks on multiple LLMs.
## PAL: Program-aided Language Models
- **arXiv id:** [2211.10435v2](http://arxiv.org/abs/2211.10435v2) **Published Date:** 2022-11-18
- **arXiv id:** 2211.10435v2
- **Title:** PAL: Program-aided Language Models
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
- **Published Date:** 2022-11-18
- **URL:** http://arxiv.org/abs/2211.10435v2
- **LangChain:**
- **API Reference:** [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)
- **API Reference:** [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
- **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
@@ -647,13 +686,15 @@ publicly available at http://reasonwithpal.com/ .
## ReAct: Synergizing Reasoning and Acting in Language Models
- **arXiv id:** [2210.03629v3](http://arxiv.org/abs/2210.03629v3) **Published Date:** 2022-10-06
- **arXiv id:** 2210.03629v3
- **Title:** ReAct: Synergizing Reasoning and Acting in Language Models
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
- **Published Date:** 2022-10-06
- **URL:** http://arxiv.org/abs/2210.03629v3
- **LangChain:**
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
- **API Reference:** [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain), [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent)
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
across tasks in language understanding and interactive decision making, their
@@ -680,9 +721,11 @@ Project site with code: https://react-lm.github.io
## Deep Lake: a Lakehouse for Deep Learning
- **arXiv id:** [2209.10785v2](http://arxiv.org/abs/2209.10785v2) **Published Date:** 2022-09-22
- **arXiv id:** 2209.10785v2
- **Title:** Deep Lake: a Lakehouse for Deep Learning
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
- **Published Date:** 2022-09-22
- **URL:** http://arxiv.org/abs/2209.10785v2
- **LangChain:**
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
@@ -704,43 +747,13 @@ visualization engine, or (c) deep learning frameworks without sacrificing GPU
utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
TensorFlow, JAX, and integrate with numerous MLOps tools.
## Matryoshka Representation Learning
- **arXiv id:** [2205.13147v4](http://arxiv.org/abs/2205.13147v4) **Published Date:** 2022-05-26
- **Title:** Matryoshka Representation Learning
- **Authors:** Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al.
- **LangChain:**
- **Documentation:** [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
**Abstract:** Learned representations are a central component in modern ML systems, serving
a multitude of downstream tasks. When training such representations, it is
often the case that computational and statistical constraints for each
downstream task are unknown. In this context rigid, fixed capacity
representations can be either over or under-accommodating to the task at hand.
This leads us to ask: can we design a flexible representation that can adapt to
multiple downstream tasks with varying computational resources? Our main
contribution is Matryoshka Representation Learning (MRL) which encodes
information at different granularities and allows a single embedding to adapt
to the computational constraints of downstream tasks. MRL minimally modifies
existing representation learning pipelines and imposes no additional cost
during inference and deployment. MRL learns coarse-to-fine representations that
are at least as accurate and rich as independently trained low-dimensional
representations. The flexibility within the learned Matryoshka Representations
offer: (a) up to 14x smaller embedding size for ImageNet-1K classification at
the same level of accuracy; (b) up to 14x real-world speed-ups for large-scale
retrieval on ImageNet-1K and 4K; and (c) up to 2% accuracy improvements for
long-tail few-shot classification, all while being as robust as the original
representations. Finally, we show that MRL extends seamlessly to web-scale
datasets (ImageNet, JFT) across various modalities -- vision (ViT, ResNet),
vision + language (ALIGN) and language (BERT). MRL code and pretrained models
are open-sourced at https://github.com/RAIVNLab/MRL.
## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
- **arXiv id:** [2205.12654v1](http://arxiv.org/abs/2205.12654v1) **Published Date:** 2022-05-25
- **arXiv id:** 2205.12654v1
- **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
- **Published Date:** 2022-05-25
- **URL:** http://arxiv.org/abs/2205.12654v1
- **LangChain:**
- **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
@@ -765,12 +778,14 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems.
## Evaluating the Text-to-SQL Capabilities of Large Language Models
- **arXiv id:** [2204.00498v1](http://arxiv.org/abs/2204.00498v1) **Published Date:** 2022-03-15
- **arXiv id:** 2204.00498v1
- **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
- **Published Date:** 2022-03-15
- **URL:** http://arxiv.org/abs/2204.00498v1
- **LangChain:**
- **API Reference:** [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
- **API Reference:** [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
language model. We find that, without any finetuning, Codex is a strong
@@ -782,12 +797,14 @@ few-shot examples.
## Locally Typical Sampling
- **arXiv id:** [2202.00666v5](http://arxiv.org/abs/2202.00666v5) **Published Date:** 2022-02-01
- **arXiv id:** 2202.00666v5
- **Title:** Locally Typical Sampling
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
- **Published Date:** 2022-02-01
- **URL:** http://arxiv.org/abs/2202.00666v5
- **LangChain:**
- **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
**Abstract:** Today's probabilistic language generators fall short when it comes to
producing coherent and fluent text despite the fact that the underlying models
@@ -812,9 +829,11 @@ reducing degenerate repetitions.
## Learning Transferable Visual Models From Natural Language Supervision
- **arXiv id:** [2103.00020v1](http://arxiv.org/abs/2103.00020v1) **Published Date:** 2021-02-26
- **arXiv id:** 2103.00020v1
- **Title:** Learning Transferable Visual Models From Natural Language Supervision
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
- **Published Date:** 2021-02-26
- **URL:** http://arxiv.org/abs/2103.00020v1
- **LangChain:**
- **API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
@@ -842,12 +861,14 @@ https://github.com/OpenAI/CLIP.
## CTRL: A Conditional Transformer Language Model for Controllable Generation
- **arXiv id:** [1909.05858v2](http://arxiv.org/abs/1909.05858v2) **Published Date:** 2019-09-11
- **arXiv id:** 1909.05858v2
- **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
- **Published Date:** 2019-09-11
- **URL:** http://arxiv.org/abs/1909.05858v2
- **LangChain:**
- **API Reference:** [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
**Abstract:** Large-scale language models show promising text generation capabilities, but
users cannot easily control particular aspects of the generated text. We
@@ -860,4 +881,32 @@ codes also allow CTRL to predict which parts of the training data are most
likely given a sequence. This provides a potential method for analyzing large
amounts of data via model-based source attribution. We have released multiple
full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
## Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
- **arXiv id:** 1908.10084v1
- **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
- **Authors:** Nils Reimers, Iryna Gurevych
- **Published Date:** 2019-08-27
- **URL:** http://arxiv.org/abs/1908.10084v1
- **LangChain:**
- **Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
state-of-the-art performance on sentence-pair regression tasks like semantic
textual similarity (STS). However, it requires that both sentences are fed into
the network, which causes a massive computational overhead: Finding the most
similar pair in a collection of 10,000 sentences requires about 50 million
inference computations (~65 hours) with BERT. The construction of BERT makes it
unsuitable for semantic similarity search as well as for unsupervised tasks
like clustering.
In this publication, we present Sentence-BERT (SBERT), a modification of the
pretrained BERT network that use siamese and triplet network structures to
derive semantically meaningful sentence embeddings that can be compared using
cosine-similarity. This reduces the effort for finding the most similar pair
from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while
maintaining the accuracy from BERT.
We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning
tasks, where it outperforms other state-of-the-art sentence embeddings methods.

View File

@@ -51,11 +51,10 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
<ThemedImage
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
sources={{
light: useBaseUrl('/svg/langchain_stack_062024.svg'),
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
light: useBaseUrl('/svg/langchain_stack.svg'),
dark: useBaseUrl('/svg/langchain_stack_dark.svg'),
}}
title="LangChain Framework Overview"
style={{ width: "100%" }}
/>
## LangChain Expression Language (LCEL)
@@ -86,16 +85,11 @@ Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas i
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
LCEL aims to provide consistency around behavior and customization over legacy subclassed chains such as `LLMChain` and
`ConversationalRetrievalChain`. Many of these legacy chains hide important details like prompts, and as a wider variety
of viable models emerge, customization has become more and more important.
If you are currently using one of these legacy chains, please see [this guide for guidance on how to migrate](/docs/versions/migrating_chains).
For guides on how to do specific tasks with LCEL, check out [the relevant how-to guides](/docs/how_to/#langchain-expression-language-lcel).
[**Seamless LangServe deployment**](/docs/langserve)
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
### Runnable interface
<span data-heading-keywords="invoke,runnable"></span>
<span data-heading-keywords="invoke"></span>
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
@@ -165,7 +159,7 @@ Some important things to note:
ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model.
:::important
Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.
**Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling.
Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.
Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
:::
@@ -236,7 +230,7 @@ This is where information like log-probs and token usage may be stored.
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
They can be accessed from there with the `.tool_calls` property.
This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with the following arguments:
This property returns a list of dictionaries. Each dictionary has the following keys:
- `name`: The name of the tool that should be called.
- `args`: The arguments to that tool.
@@ -246,19 +240,14 @@ This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with t
This represents a system message, which tells the model how to behave. Not every model provider supports this.
#### ToolMessage
This represents the result of a tool call. In addition to `role` and `content`, this message has:
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
#### (Legacy) FunctionMessage
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. `ToolMessage` should be used instead to correspond to the updated tool-calling API.
#### FunctionMessage
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
#### ToolMessage
This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result.
### Prompt templates
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
@@ -498,130 +487,36 @@ Retrievers accept a string query as input and return a list of Document's as out
For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
### Key-value stores
For some techniques, such as [indexing and retrieval with multiple vectors per document](/docs/how_to/multi_vector/) or
[caching embeddings](/docs/how_to/caching_embeddings/), having a form of key-value (KV) storage is helpful.
LangChain includes a [`BaseStore`](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.BaseStore.html) interface,
which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a
more specific `BaseStore[str, bytes]` instance that stores binary data (referred to as a `ByteStore`), and internally take care of
encoding and decoding data for their specific needs.
This means that as a user, you only need to think about one type of store rather than different ones for different types of data.
#### Interface
All [`BaseStores`](https://api.python.langchain.com/en/latest/stores/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows
for modifying **multiple** key-value pairs at once:
- `mget(key: Sequence[str]) -> List[Optional[bytes]]`: get the contents of multiple keys, returning `None` if the key does not exist
- `mset(key_value_pairs: Sequence[Tuple[str, bytes]]) -> None`: set the contents of multiple keys
- `mdelete(key: Sequence[str]) -> None`: delete multiple keys
- `yield_keys(prefix: Optional[str] = None) -> Iterator[str]`: yield all keys in the store, optionally filtering by a prefix
For key-value store implementations, see [this section](/docs/integrations/stores/).
### Tools
<span data-heading-keywords="tool,tools"></span>
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
A tool consists of:
A tool consists of the following components:
1. The name of the tool.
2. A description of what the tool does.
3. A JSON schema defining the inputs to the tool.
4. A function (and, optionally, an async variant of the function).
1. The name of the tool
2. A description of what the tool does
3. JSON schema of what the inputs to the tool are
4. The function to call
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
Typical usage may look like the following:
The name, description and JSON schema are provided as context
to the LLM, allowing the LLM to determine how to use the tool
appropriately.
```python
tools = [...] # Define a list of tools
llm_with_tools = llm.bind_tools(tools)
ai_msg = llm_with_tools.invoke("do xyz...")
# -> AIMessage(tool_calls=[ToolCall(...), ...], ...)
```
Given a list of available tools and a prompt, an LLM can request
that one or more tools be invoked with appropriate arguments.
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
Once the chosen tools are invoked, the results can be passed back to the model so that it can complete whatever task
it's performing.
There are generally two different ways to invoke the tool and pass back the response:
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
- Simpler tools are generally easier for models to use than more complex tools.
#### Invoke with just the arguments
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
This generally looks like:
```python
# You will want to previously check that the LLM returned tool calls
tool_call = ai_msg.tool_calls[0]
# ToolCall(args={...}, id=..., ...)
tool_output = tool.invoke(tool_call["args"])
tool_message = ToolMessage(
content=tool_output,
tool_call_id=tool_call["id"],
name=tool_call["name"]
)
```
Note that the `content` field will generally be passed back to the model.
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
```python
... # Same code as above
response_for_llm = transform(response)
tool_message = ToolMessage(
content=response_for_llm,
tool_call_id=tool_call["id"],
name=tool_call["name"],
artifact=tool_output
)
```
#### Invoke with `ToolCall`
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
When you do this, the tool will return a ToolMessage.
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
This generally looks like:
```python
tool_call = ai_msg.tool_calls[0]
# -> ToolCall(args={...}, id=..., ...)
tool_message = tool.invoke(tool_call)
# -> ToolMessage(
content="tool result foobar...",
tool_call_id=...,
name="tool_name"
)
```
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
#### Best practices
When designing tools to be used by a model, it is important to keep in mind that:
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
- Simple, narrowly scoped tools are easier for models to use than complex tools.
#### Related
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
### Toolkits
<span data-heading-keywords="toolkit,toolkits"></span>
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
@@ -655,28 +550,6 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
It is recommended, however, that you start to transition to LangGraph.
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
#### ReAct agents
<span data-heading-keywords="react,react agent"></span>
One popular architecture for building agents is [**ReAct**](https://arxiv.org/abs/2210.03629).
ReAct combines reasoning and acting in an iterative process - in fact the name "ReAct" stands for "Reason" and "Act".
The general flow looks like this:
- The model will "think" about what step to take in response to an input and any previous observations.
- The model will then choose an action from available tools (or choose to respond to the user).
- The model will generate arguments to that tool.
- The agent runtime (executor) will parse out the chosen tool and call it with the generated arguments.
- The executor will return the results of the tool call back to the model as an observation.
- This process repeats until the agent chooses to respond.
There are general prompting based implementations that do not require any model-specific features, but the most
reliable implementations use features like [tool calling](/docs/how_to/tool_calling/) to reliably format outputs
and reduce variance.
Please see the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for more information,
or [this how-to guide](/docs/how_to/migrate_agent/) for specific information on migrating to LangGraph.
### Callbacks
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
@@ -866,61 +739,6 @@ units (like words or subwords) that carry meaning, rather than individual charac
to learn and understand the structure of the language, including grammar and context.
Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing.
### Function/tool calling
:::info
We use the term tool calling interchangeably with function calling. Although
function calling is sometimes meant to refer to invocations of a single function,
we treat all models as though they can return multiple tool or function calls in
each message.
:::
Tool calling allows a [chat model](/docs/concepts/#chat-models) to respond to a given prompt by generating output that
matches a user-defined schema.
While the name implies that the model is performing
some action, this is actually not the case! The model only generates the arguments to a tool, and actually running the tool (or not) is up to the user.
One common example where you **wouldn't** want to call a function with the generated arguments
is if you want to [extract structured output matching some schema](/docs/concepts/#structured-output)
from unstructured text. You would give the model an "extraction" tool that takes
parameters matching the desired schema, then treat the generated output as your final
result.
![Diagram of a tool call by a chat model](/img/tool_call.png)
Tool calling is not universal, but is supported by many popular LLM providers, including [Anthropic](/docs/integrations/chat/anthropic/),
[Cohere](/docs/integrations/chat/cohere/), [Google](/docs/integrations/chat/google_vertex_ai_palm/),
[Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/docs/integrations/chat/openai/), and even for locally-running models via [Ollama](/docs/integrations/chat/ollama/).
LangChain provides a standardized interface for tool calling that is consistent across different models.
The standard interface consists of:
* `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/docs/concepts/#tools) as well as [Pydantic](https://pydantic.dev/) objects.
* `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model.
#### Tool usage
After the model calls tools, you can use the tool by invoking it, then passing the arguments back to the model.
LangChain provides the [`Tool`](/docs/concepts/#tools) abstraction to help you handle this.
The general flow is this:
1. Generate tool calls with a chat model in response to a query.
2. Invoke the appropriate tools using the generated tool call as arguments.
3. Format the result of the tool invocations as [`ToolMessages`](/docs/concepts/#toolmessage).
4. Pass the entire list of messages back to the model so that it can generate a final answer (or call more tools).
![Diagram of a complete tool calling flow](/img/tool_calling_flow.png)
This is how tool calling [agents](/docs/concepts/#agents) perform tasks and answer queries.
Check out some more focused guides below:
- [How to use chat models to call tools](/docs/how_to/tool_calling/)
- [How to pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model/)
- [Building an agent with LangGraph](https://langchain-ai.github.io/langgraph/tutorials/introduction/)
### Structured output
LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide
@@ -936,54 +754,14 @@ a few ways to get structured output from models in LangChain.
#### `.with_structured_output()`
For convenience, some LangChain chat models support a [`.with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method)
method. This method only requires a schema as input, and returns a dict or Pydantic object.
For convenience, some LangChain chat models support a `.with_structured_output()` method.
This method only requires a schema as input, and returns a dict or Pydantic object.
Generally, this method is only present on models that support one of the more advanced methods described below,
and will use one of them under the hood. It takes care of importing a suitable output parser and
formatting the schema in the right format for the model.
Here's an example:
```python
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_llm = llm.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats")
```
```
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
```
We recommend this method as a starting point when working with structured output:
- It uses other model-specific features under the hood, without the need to import an output parser.
- For the models that use tool calling, no special prompting is needed.
- If multiple underlying techniques are supported, you can supply a `method` parameter to
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
You may want or need to use other techniques if:
- The chat model you are using does not support tool calling.
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
You can also check out [this table](/docs/integrations/chat/#advanced-features) for a list of models that support
`with_structured_output()`.
#### Raw prompting
The most intuitive way to get a model to structure output is to ask nicely.
@@ -1006,8 +784,9 @@ for smooth parsing can be surprisingly difficult and model-specific.
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
and still others may prefer XML.
While features offered by model providers may increase reliability, prompting techniques remain important for tuning your
results no matter which method you choose.
While we'll next go over some ways that you can take advantage of features offered by
model providers to increase reliability, prompting techniques remain important for tuning your
results no matter what method you choose.
#### JSON mode
<span data-heading-keywords="json mode"></span>
@@ -1017,11 +796,10 @@ Some models, such as [Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/do
support a feature called **JSON mode**, usually enabled via config.
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
Often they require some custom prompting, but it's usually much less burdensome than completely raw prompting and
more along the lines of, `"you must always return JSON"`. The [output also generally easier to parse](/docs/how_to/output_parser_json/).
Often they require some custom prompting, but it's usually much less burdensome and along the lines of,
`"you must always return JSON"`, and the [output is easier to parse](/docs/how_to/output_parser_json/).
It's also generally simpler to use directly and more commonly available than tool calling, and can give
more flexibility around prompting and shaping results than tool calling.
It's also generally simpler and more commonly available than tool calling.
Here's an example:
@@ -1053,48 +831,48 @@ chain.invoke({ "question": "What is the powerhouse of the cell?" })
For a full list of model providers that support JSON mode, see [this table](/docs/integrations/chat/#advanced-features).
#### Tool calling {#structured-output-tool-calling}
#### Function/tool calling
For models that support it, [tool calling](/docs/concepts/#functiontool-calling) can be very convenient for structured output. It removes the
guesswork around how best to prompt schemas in favor of a built-in model feature.
:::info
We use the term tool calling interchangeably with function calling. Although
function calling is sometimes meant to refer to invocations of a single function,
we treat all models as though they can return multiple tool or function calls in
each message
:::
It works by first binding the desired schema either directly or via a [LangChain tool](/docs/concepts/#tools) to a
[chat model](/docs/concepts/#chat-models) using the `.bind_tools()` method. The model will then generate an `AIMessage` containing
a `tool_calls` field containing `args` that match the desired shape.
Tool calling allows a model to respond to a given prompt by generating output that
matches a user-defined schema. While the name implies that the model is performing
some action, this is actually not the case! The model is coming up with the
arguments to a tool, and actually running the tool (or not) is up to the user -
for example, if you want to [extract output matching some schema](/docs/tutorials/extraction)
from unstructured text, you could give the model an "extraction" tool that takes
parameters matching the desired schema, then treat the generated output as your final
result.
There are several acceptable formats you can use to bind tools to a model in LangChain. Here's one example:
For models that support it, tool calling can be very convenient. It removes the
guesswork around how best to prompt schemas in favor of a built-in model feature. It can also
more naturally support agentic flows, since you can just pass multiple tool schemas instead
of fiddling with enums or unions.
```python
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
Many LLM providers, including [Anthropic](https://www.anthropic.com/),
[Cohere](https://cohere.com/), [Google](https://cloud.google.com/vertex-ai),
[Mistral](https://mistral.ai/), [OpenAI](https://openai.com/), and others,
support variants of a tool calling feature. These features typically allow requests
to the LLM to include available tools and their schemas, and for responses to include
calls to these tools. For instance, given a search engine tool, an LLM might handle a
query by first issuing a call to the search engine. The system calling the LLM can
receive the tool call, execute it, and return the output to the LLM to inform its
response. LangChain includes a suite of [built-in tools](/docs/integrations/tools/)
and supports several methods for defining your own [custom tools](/docs/how_to/custom_tools).
class ResponseFormatter(BaseModel):
"""Always use this tool to structure your response to the user."""
LangChain provides a standardized interface for tool calling that is consistent across different models.
answer: str = Field(description="The answer to the user's question")
followup_question: str = Field(description="A followup question the user could ask")
The standard interface consists of:
model = ChatOpenAI(
model="gpt-4o",
temperature=0,
)
* `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/docs/concepts/#tools) here.
* `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model.
model_with_tools = model.bind_tools([ResponseFormatter])
ai_msg = model_with_tools.invoke("What is the powerhouse of the cell?")
ai_msg.tool_calls[0]["args"]
```
```
{'answer': "The powerhouse of the cell is the mitochondrion. It generates most of the cell's supply of adenosine triphosphate (ATP), which is used as a source of chemical energy.",
'followup_question': 'How do mitochondria generate ATP?'}
```
Tool calling is a generally consistent way to get a model to generate structured output, and is the default technique
used for the [`.with_structured_output()`](/docs/concepts/#with_structured_output) method when a model supports it.
The following how-to guides are good practical resources for using function/tool calling for structured output:
The following how-to guides are good practical resources for using function/tool calling:
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
- [How to use a model to call tools](/docs/how_to/tool_calling)
@@ -1175,7 +953,7 @@ See our [blog post overview](https://blog.langchain.dev/query-construction/) and
#### Indexing
Fourth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
@@ -1255,7 +1033,7 @@ See several videos and cookbooks showcasing RAG with LangGraph:
- [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag)
See our LangGraph RAG recipes with partners:
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/3p_integrations/langchain)
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/use_cases/agents/langchain)
- [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain)
:::
@@ -1283,7 +1061,7 @@ Table columns:
| Token | [many classes](/docs/how_to/split_by_token/) | Tokens | | Splits text on tokens. There exist a few different ways to measure tokens. |
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
### Evaluation
<span data-heading-keywords="evaluation,evaluate"></span>
@@ -1301,13 +1079,3 @@ This process is vital for building reliable applications.
- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
### Tracing
<span data-heading-keywords="trace,tracing"></span>
A trace is essentially a series of steps that your application takes to go from input to output.
Traces contain individual steps called `runs`. These can be individual calls from a model, retriever,
tool, or sub-chains.
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing).

View File

@@ -13,15 +13,15 @@ organization and structure.
LangChain's documentation follows the [Diataxis framework](https://diataxis.fr).
Under this framework, all documentation falls under one of four categories: [Tutorials](/docs/contributing/documentation/style_guide/#tutorials),
[How-to guides](/docs/contributing/documentation/style_guide/#how-to-guides),
[References](/docs/contributing/documentation/style_guide/#references), and [Explanations](/docs/contributing/documentation/style_guide/#conceptual-guide).
[References]/docs/contributing/documentation/style_guide/#references, and [Explanations](/docs/contributing/documentation/style_guide/#conceptual-guide).
### Tutorials
Tutorials are lessons that take the reader through a practical activity. Their purpose is to help the user
gain understanding of concepts and how they interact by showing one way to achieve some goal in a hands-on way. They should **avoid** giving
multiple permutations of ways to achieve that goal in-depth. Instead, it should guide a new user through a recommended path to accomplishing the tutorial's goal. While the end result of a tutorial does not necessarily need to
be completely production-ready, it should be useful and practically satisfy the the goal that you clearly stated in the tutorial's introduction. Information on how to address additional scenarios
belongs in how-to guides.
gain understanding of concepts and how they interact by showing one way to achieve some goal in a hands-on way. They should not cover
multiple permutations of ways to achieve that goal in-depth, and the end result of a tutorial does not need to
be completely production-ready against all cases. Information on how to address additional scenarios
can occur in how-to guides.
To quote the Diataxis website:
@@ -33,22 +33,20 @@ Some examples include:
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
A good structural rule of thumb is to follow the structure of this [example from Numpy](https://numpy.org/numpy-tutorials/content/tutorial-svd.html).
Here are some high-level tips on writing a good tutorial:
- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system.
- Be specific, not abstract and follow one path.
- No need to go deeply into alternative approaches, but its ok to reference them, ideally with a link to an appropriate how-to guide.
- Get "a point on the board" as soon as possible - something the user can run that outputs something.
- You can iterate and expand afterwards.
- Try to frequently checkpoint at given steps where the user can run code and see progress.
- Focus on results, not technical explanation.
- Crosslink heavily to appropriate conceptual/reference pages.
- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page.
- It's also helpful to add a prerequisite callout that links to any pages with necessary background information.
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides.
- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system
- Be specific, not abstract and follow one path
- No need to go deeply into alternative approaches, but its ok to reference them, ideally with a link to an appropriate how-to guide
- Get "a point on the board" as soon as possible - something the user can run that outputs something
- You can iterate and expand afterwards
- Try to frequently checkpoint at given steps where the user can run code and see progress
- Focus on results, not technical explanation
- Crosslink heavily to appropriate conceptual/reference pages
- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page
- It's also helpful to add a prerequisite callout that links to any pages with necessary background information
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides
### How-to guides
@@ -68,21 +66,20 @@ Some examples include:
Here are some high-level tips on writing a good how-to guide:
- Clearly explain what you are guiding the user through at the start.
- Assume higher intent than a tutorial and show what the user needs to do to get that task done.
- Assume familiarity of concepts, but explain why suggested actions are helpful.
- Crosslink heavily to conceptual/reference pages.
- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem.
- Use lots of example code.
- Prefer full code blocks that the reader can copy and run.
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides.
- Clearly explain what you are guiding the user through at the start
- Assume higher intent than a tutorial and show what the user needs to do to get that task done
- Assume familiarity of concepts, but explain why suggested actions are helpful
- Crosslink heavily to conceptual/reference pages
- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem
- Use lots of example code
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides
### Conceptual guide
LangChain's conceptual guide falls under the **Explanation** quadrant of Diataxis. They should cover LangChain terms and concepts
in a more abstract way than how-to guides or tutorials, and should be geared towards curious users interested in
gaining a deeper understanding of the framework. Try to avoid excessively large code examples - the goal here is to
impart perspective to the user rather than to finish a practical project. These guides should cover **why** things work they way they do.
gaining a deeper understanding of the framework. There should be few, if any, concrete code examples. The goal here is to
impart perspective to the user rather than to finish a practical project.
This guide on documentation style is meant to fall under this category.

View File

@@ -12,7 +12,7 @@ As an open-source project in a rapidly developing field, we are extremely open t
There are many ways to contribute to LangChain. Here are some common ways people contribute:
- [**Documentation**](/docs/contributing/documentation/): Help improve our docs, including this one!
- [**Documentation**](/docs/contributing/documentation/style_guide): Help improve our docs, including this one!
- [**Code**](/docs/contributing/code/): Help us write code, fix bugs, or improve our infrastructure.
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.

View File

@@ -11,7 +11,7 @@ There are a few different places you can contribute integrations for LangChain:
- **Community**: For lighter-weight integrations that are primarily maintained by LangChain and the Open Source Community.
- **Partner Packages**: For independent packages that are co-maintained by LangChain and a partner.
For the most part, **new integrations should be added to the Community package**. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
For the most part, new integrations should be added to the Community package. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`.
@@ -60,10 +60,6 @@ And add documentation to:
## Partner package in LangChain repo
:::caution
Before starting a **partner** package, please confirm your intent with the LangChain team. Partner packages require more maintenance as separate packages, so we will close PRs that add new partner packages without prior discussion. See the above section for how to add a community integration.
:::
Partner packages can be hosted in the `LangChain` monorepo or in an external repo.
Partner package in the `LangChain` repo is placed in `libs/partners/{partner}`

View File

@@ -7,7 +7,6 @@ If you plan on contributing to LangChain code or documentation, it can be useful
to understand the high level structure of the repository.
LangChain is organized as a [monorepo](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
You can check out our [installation guide](/docs/how_to/installation/) for more on how they fit together.
Here's the structure visualized as a tree:

Binary file not shown.

View File

@@ -153,7 +153,7 @@
"\n",
" def parse(self, text: str) -> List[str]:\n",
" lines = text.strip().split(\"\\n\")\n",
" return list(filter(None, lines)) # Remove empty lines\n",
" return lines\n",
"\n",
"\n",
"output_parser = LineListOutputParser()\n",

View File

@@ -1,342 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to dispatch custom callback events\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"- [Astream Events API](/docs/concepts/#astream_events) the `astream_events` method will surface custom callback events.\n",
":::\n",
"\n",
"In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n",
"in a custom callback handler or via the [Astream Events API](/docs/concepts/#astream_events).\n",
"\n",
"For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n",
"You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n",
"\n",
"To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n",
"\n",
"| Attribute | Type | Description |\n",
"|-----------|------|----------------------------------------------------------------------------------------------------------|\n",
"| name | str | A user defined name for the event. |\n",
"| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n",
"\n",
"\n",
":::{.callout-important}\n",
"* Dispatching custom callback events requires `langchain-core>=0.2.15`.\n",
"* Custom callback events can only be dispatched from within an existing `Runnable`.\n",
"* If using `astream_events`, you must use `version='v2'` to see custom events.\n",
"* Sending or rendering custom callbacks events in LangSmith is not yet supported.\n",
":::\n",
"\n",
"\n",
":::caution COMPATIBILITY\n",
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in python<=3.10. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
"\n",
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
"\n",
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain-core"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Astream Events API\n",
"\n",
"The most useful way to consume custom events is via the [Astream Events API](/docs/concepts/#astream_events).\n",
"\n",
"We can use the `async` `adispatch_custom_event` API to emit custom events in an async setting. \n",
"\n",
"\n",
":::{.callout-important}\n",
"\n",
"To see custom events via the astream events API, you need to use the newer `v2` API of `astream_events`.\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'foo', 'tags': [], 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'metadata': {}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
"{'event': 'on_chain_stream', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
]
}
],
"source": [
"from langchain_core.callbacks.manager import (\n",
" adispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"@RunnableLambda\n",
"async def foo(x: str) -> str:\n",
" await adispatch_custom_event(\"event1\", {\"x\": x})\n",
" await adispatch_custom_event(\"event2\", 5)\n",
" return x\n",
"\n",
"\n",
"async for event in foo.astream_events(\"hello world\", version=\"v2\"):\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In python <= 3.10, you must propagate the config manually!"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'bar', 'tags': [], 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'metadata': {}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
"{'event': 'on_chain_stream', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
]
}
],
"source": [
"from langchain_core.callbacks.manager import (\n",
" adispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"@RunnableLambda\n",
"async def bar(x: str, config: RunnableConfig) -> str:\n",
" \"\"\"An example that shows how to manually propagate config.\n",
"\n",
" You must do this if you're running python<=3.10.\n",
" \"\"\"\n",
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
" return x\n",
"\n",
"\n",
"async for event in bar.astream_events(\"hello world\", version=\"v2\"):\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Async Callback Handler\n",
"\n",
"You can also consume the dispatched event via an async callback handler."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n",
"Received event event2 with data: 5, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n"
]
},
{
"data": {
"text/plain": [
"1"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List, Optional\n",
"from uuid import UUID\n",
"\n",
"from langchain_core.callbacks import AsyncCallbackHandler\n",
"from langchain_core.callbacks.manager import (\n",
" adispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"class AsyncCustomCallbackHandler(AsyncCallbackHandler):\n",
" async def on_custom_event(\n",
" self,\n",
" name: str,\n",
" data: Any,\n",
" *,\n",
" run_id: UUID,\n",
" tags: Optional[List[str]] = None,\n",
" metadata: Optional[Dict[str, Any]] = None,\n",
" **kwargs: Any,\n",
" ) -> None:\n",
" print(\n",
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
" )\n",
"\n",
"\n",
"@RunnableLambda\n",
"async def bar(x: str, config: RunnableConfig) -> str:\n",
" \"\"\"An example that shows how to manually propagate config.\n",
"\n",
" You must do this if you're running python<=3.10.\n",
" \"\"\"\n",
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
" return x\n",
"\n",
"\n",
"async_handler = AsyncCustomCallbackHandler()\n",
"await foo.ainvoke(1, {\"callbacks\": [async_handler], \"tags\": [\"foo\", \"bar\"]})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sync Callback Handler\n",
"\n",
"Let's see how to emit custom events in a sync environment using `dispatch_custom_event`.\n",
"\n",
"You **must** call `dispatch_custom_event` from within an existing `Runnable`."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n",
"Received event event2 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n"
]
},
{
"data": {
"text/plain": [
"1"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List, Optional\n",
"from uuid import UUID\n",
"\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.callbacks.manager import (\n",
" dispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"class CustomHandler(BaseCallbackHandler):\n",
" def on_custom_event(\n",
" self,\n",
" name: str,\n",
" data: Any,\n",
" *,\n",
" run_id: UUID,\n",
" tags: Optional[List[str]] = None,\n",
" metadata: Optional[Dict[str, Any]] = None,\n",
" **kwargs: Any,\n",
" ) -> None:\n",
" print(\n",
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
" )\n",
"\n",
"\n",
"@RunnableLambda\n",
"def foo(x: int, config: RunnableConfig) -> int:\n",
" dispatch_custom_event(\"event1\", {\"x\": x})\n",
" dispatch_custom_event(\"event2\", {\"x\": x})\n",
" return x\n",
"\n",
"\n",
"handler = CustomHandler()\n",
"foo.invoke(1, {\"callbacks\": [handler], \"tags\": [\"foo\", \"bar\"]})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've seen how to emit custom events, you can check out the more in depth guide for [astream events](/docs/how_to/streaming/#using-stream-events) which is the easiest way to leverage custom events."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -63,7 +63,7 @@
"outputs": [],
"source": [
"# <!-- ruff: noqa: F821 -->\n",
"from langchain_core.globals import set_llm_cache"
"from langchain.globals import set_llm_cache"
]
},
{
@@ -103,7 +103,7 @@
],
"source": [
"%%time\n",
"from langchain_core.caches import InMemoryCache\n",
"from langchain.cache import InMemoryCache\n",
"\n",
"set_llm_cache(InMemoryCache())\n",
"\n",

View File

@@ -1,146 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "dcf87b32",
"metadata": {},
"source": [
"# How to handle rate limits\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LLMs](/docs/concepts/#llms)\n",
":::\n",
"\n",
"\n",
"You may find yourself in a situation where you are getting rate limited by the model provider API because you're making too many requests.\n",
"\n",
"For example, this might happen if you are running many parallel queries to benchmark the chat model on a test dataset.\n",
"\n",
"If you are facing such a situation, you can use a rate limiter to help match the rate at which you're making request to the rate allowed\n",
"by the API.\n",
"\n",
":::info Requires ``langchain-core >= 0.2.24``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.24``. Please make sure your package is up to date.\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "cbc3c873-6109-4e03-b775-b73c1003faea",
"metadata": {},
"source": [
"## Initialize a rate limiter\n",
"\n",
"Langchain comes with a built-in in memory rate limiter. This rate limiter is thread safe and can be shared by multiple threads in the same process.\n",
"\n",
"The provided rate limiter can only limit the number of requests per unit time. It will not help if you need to also limited based on the size\n",
"of the requests."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "aa9c3c8c-0464-4190-a8c5-d69d173505a6",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.rate_limiters import InMemoryRateLimiter\n",
"\n",
"rate_limiter = InMemoryRateLimiter(\n",
" requests_per_second=0.1, # <-- Super slow! We can only make a request once every 10 seconds!!\n",
" check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request,\n",
" max_bucket_size=10, # Controls the maximum burst size.\n",
")"
]
},
{
"cell_type": "markdown",
"id": "8e058bde-9413-4b08-8cc6-0c9cb638f19f",
"metadata": {},
"source": [
"## Choose a model\n",
"\n",
"Choose any model and pass to it the rate_limiter via the `rate_limiter` attribute."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "0f880a3a-c047-4e94-a323-fff2a4c0e96d",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"from getpass import getpass\n",
"\n",
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
"\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"model = ChatAnthropic(model_name=\"claude-3-opus-20240229\", rate_limiter=rate_limiter)"
]
},
{
"cell_type": "markdown",
"id": "80c9ab3a-299a-460f-985c-90280a046f52",
"metadata": {},
"source": [
"Let's confirm that the rate limiter works. We should only be able to invoke the model once per 10 seconds."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d074265c-9f32-4c5f-b914-944148993c4d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"11.599073648452759\n",
"10.7502121925354\n",
"10.244257926940918\n",
"8.83088755607605\n",
"11.645203590393066\n"
]
}
],
"source": [
"for _ in range(5):\n",
" tic = time.time()\n",
" model.invoke(\"hello\")\n",
" toc = time.time()\n",
" print(toc - tic)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -15,12 +15,6 @@
"\n",
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
"\n",
":::\n",
"\n",
":::info Requires ``langchain >= 0.2.8``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.8``. Please make sure your package is up to date.\n",
"\n",
":::"
]
},
@@ -31,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
]
},
{
@@ -82,6 +76,32 @@
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
]
},
{
"cell_type": "markdown",
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
"metadata": {},
"source": [
"## Simple config example"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
"metadata": {},
"outputs": [],
"source": [
"user_config = {\n",
" \"model\": \"...user-specified...\",\n",
" \"model_provider\": \"...user-specified...\",\n",
" \"temperature\": 0,\n",
" \"max_tokens\": 1000,\n",
"}\n",
"\n",
"llm = init_chat_model(**user_config)\n",
"llm.invoke(\"what's your name\")"
]
},
{
"cell_type": "markdown",
"id": "f811f219-5e78-4b62-b495-915d52a22532",
@@ -104,216 +124,13 @@
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "476a44db-c50d-4846-951d-0f1c9ba8bbaa",
"metadata": {},
"source": [
"## Creating a configurable model\n",
"\n",
"You can also create a runtime-configurable model by specifying `configurable_fields`. If you don't specify a `model` value, then \"model\" and \"model_provider\" be configurable by default."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
"execution_count": null,
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"configurable_model = init_chat_model(temperature=0)\n",
"\n",
"configurable_model.invoke(\n",
" \"what's your name\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"configurable_model.invoke(\n",
" \"what's your name\", config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}}\n",
")"
]
},
{
"cell_type": "markdown",
"id": "7f3b3d4a-4066-45e4-8297-ea81ac8e70b7",
"metadata": {},
"source": [
"### Configurable model with default values\n",
"\n",
"We can create a configurable model with default model values, specify which parameters are configurable, and add prefixes to configurable params:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "814a2289-d0db-401e-b555-d5116112b413",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"first_llm = init_chat_model(\n",
" model=\"gpt-4o\",\n",
" temperature=0,\n",
" configurable_fields=(\"model\", \"model_provider\", \"temperature\", \"max_tokens\"),\n",
" config_prefix=\"first\", # useful when you have a chain with multiple models\n",
")\n",
"\n",
"first_llm.invoke(\"what's your name\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"first_llm.invoke(\n",
" \"what's your name\",\n",
" config={\n",
" \"configurable\": {\n",
" \"first_model\": \"claude-3-5-sonnet-20240620\",\n",
" \"first_temperature\": 0.5,\n",
" \"first_max_tokens\": 100,\n",
" }\n",
" },\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0072b1a3-7e44-4b4e-8b07-efe1ba91a689",
"metadata": {},
"source": [
"### Using a configurable model declaratively\n",
"\n",
"We can call declarative operations like `bind_tools`, `with_structured_output`, `with_configurable`, etc. on a configurable model and chain a configurable model in the same way that we would a regularly instantiated chat model object."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York, NY'},\n",
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
" \"\"\"Get the current weather in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"class GetPopulation(BaseModel):\n",
" \"\"\"Get the current population in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"llm = init_chat_model(temperature=0)\n",
"llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])\n",
"\n",
"llm_with_tools.invoke(\n",
" \"what's bigger in 2024 LA or NYC\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
").tool_calls"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York City, NY'},\n",
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_tools.invoke(\n",
" \"what's bigger in 2024 LA or NYC\",\n",
" config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}},\n",
").tool_calls"
]
"outputs": [],
"source": []
}
],
"metadata": {
@@ -332,7 +149,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -16,7 +16,7 @@
"\n",
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
"\n",
"This guide requires `langchain-openai >= 0.1.9`."
"This guide requires `langchain-openai >= 0.1.8`."
]
},
{
@@ -153,7 +153,7 @@
"\n",
"#### OpenAI\n",
"\n",
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
"\n",
"```{=mdx}\n",
":::note\n",
@@ -172,18 +172,18 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='Hello' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='!' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' How' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' can' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' I' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' assist' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
]
}
],
@@ -191,7 +191,7 @@
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"\n",
"aggregate = None\n",
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
" print(chunk)\n",
" aggregate = chunk if aggregate is None else aggregate + chunk"
]
@@ -229,7 +229,7 @@
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
"metadata": {},
"source": [
"To disable streaming token counts for OpenAI, set `stream_usage` to False, or omit it from the parameters:"
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
]
},
{
@@ -242,17 +242,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='Hello' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='!' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' How' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' can' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' I' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' assist' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
]
}
],
@@ -267,7 +267,7 @@
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
"metadata": {},
"source": [
"You can also enable streaming token usage by setting `stream_usage` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
"\n",
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
]
@@ -275,7 +275,7 @@
{
"cell_type": "code",
"execution_count": 8,
"id": "0b1523d8-127e-4314-82fa-bd97aca37f9a",
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
"metadata": {},
"outputs": [
{
@@ -301,7 +301,7 @@
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" stream_usage=True,\n",
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
")\n",
"# Under the hood, .with_structured_output binds tools to the\n",
"# chat model and appends a parser.\n",
@@ -341,7 +341,7 @@
{
"cell_type": "code",
"execution_count": 9,
"id": "b04a4486-72fd-48ce-8f9e-5d281b441195",
"id": "31667d54",
"metadata": {},
"outputs": [
{
@@ -361,11 +361,7 @@
"\n",
"from langchain_community.callbacks.manager import get_openai_callback\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" temperature=0,\n",
" stream_usage=True,\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"\n",
"with get_openai_callback() as cb:\n",
" result = llm.invoke(\"Tell me a joke\")\n",
@@ -383,14 +379,14 @@
{
"cell_type": "code",
"execution_count": 10,
"id": "05f22a1d-b021-490f-8840-f628a07459f2",
"id": "e09420f4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"54\n"
"55\n"
]
}
],
@@ -401,29 +397,37 @@
" print(cb.total_tokens)"
]
},
{
"cell_type": "markdown",
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
"metadata": {},
"source": [
"```{=mdx}\n",
":::note\n",
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
":::\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c00c9158-7bb4-4279-88e6-ea70f46e6ac2",
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tokens Used: 27\n",
"\tPrompt Tokens: 11\n",
"\tCompletion Tokens: 16\n",
"Successful Requests: 1\n",
"Total Cost (USD): $2.95e-05\n"
"28\n"
]
}
],
"source": [
"with get_openai_callback() as cb:\n",
" for chunk in llm.stream(\"Tell me a joke\"):\n",
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
" pass\n",
" print(cb)"
" print(cb.total_tokens)"
]
},
{
@@ -453,7 +457,21 @@
")\n",
"tools = load_tools([\"wikipedia\"])\n",
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, verbose=True, stream_runnable=False\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9c1ae74d-8300-4041-9ff4-66093ee592b1",
"metadata": {},
"source": [
"```{=mdx}\n",
":::note\n",
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
":::\n",
"```"
]
},
{
@@ -485,30 +503,36 @@
"\n",
"\n",
"\n",
"Page: Allen's hummingbird\n",
"Summary: Allen's hummingbird (Selasphorus sasin) is a species of hummingbird that breeds in the western United States. It is one of seven species in the genus Selasphorus.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Page: Anna's hummingbird\n",
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
"\n",
"\n",
"\n",
"Page: Fastest animals\n",
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
"\n",
"\n",
"\n",
"Page: Falcon\n",
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species in level flight is the common swift, which holds the record for the fastest confirmed level flight by a bird at 111.5 km/h (69.3 mph). The peregrine falcon is known to exceed speeds of 320 km/h (200 mph) in its dives, making it the fastest bird in terms of diving speed.\u001b[0m\n",
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"Total Tokens: 1675\n",
"Prompt Tokens: 1538\n",
"Completion Tokens: 137\n",
"Total Cost (USD): $0.0009745000000000001\n"
"Total Tokens: 1787\n",
"Prompt Tokens: 1687\n",
"Completion Tokens: 100\n",
"Total Cost (USD): $0.0009935\n"
]
}
],

View File

@@ -54,7 +54,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "2bb9c73f-9d00-4a19-a81f-cab2f0fd921a",
"id": "9e4144de-d925-4d4c-91c3-685ef8baa57c",
"metadata": {},
"outputs": [],
"source": [
@@ -63,7 +63,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "a9e37aa1",
"metadata": {},
"outputs": [],
@@ -300,7 +300,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 8,
"id": "ac9295d3",
"metadata": {},
"outputs": [],
@@ -312,8 +312,10 @@
"\n",
"## Quick Install\n",
"\n",
"```bash\n",
"# Hopefully this code block isn't split\n",
"pip install langchain\n",
"```\n",
"\n",
"As an open-source project in a rapidly developing field, we are extremely open to contributions.\n",
"\"\"\""
@@ -321,7 +323,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 9,
"id": "3a0cb17a",
"metadata": {},
"outputs": [
@@ -330,14 +332,15 @@
"text/plain": [
"[Document(page_content='# 🦜️🔗 LangChain'),\n",
" Document(page_content='⚡ Building applications with LLMs through composability ⚡'),\n",
" Document(page_content='## Quick Install'),\n",
" Document(page_content='## Quick Install\\n\\n```bash'),\n",
" Document(page_content=\"# Hopefully this code block isn't split\"),\n",
" Document(page_content='pip install langchain'),\n",
" Document(page_content='```'),\n",
" Document(page_content='As an open-source project in a rapidly developing field, we'),\n",
" Document(page_content='are extremely open to contributions.')]"
]
},
"execution_count": 3,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -718,44 +721,8 @@
"php_splitter = RecursiveCharacterTextSplitter.from_language(\n",
" language=Language.PHP, chunk_size=50, chunk_overlap=0\n",
")\n",
"php_docs = php_splitter.create_documents([PHP_CODE])\n",
"php_docs"
]
},
{
"cell_type": "markdown",
"id": "e9fa62c1",
"metadata": {},
"source": [
"## PowerShell\n",
"Here's an example using the PowerShell text splitter:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7e6893ad",
"metadata": {},
"outputs": [],
"source": [
"POWERSHELL_CODE = \"\"\"\n",
"$directoryPath = Get-Location\n",
"\n",
"$items = Get-ChildItem -Path $directoryPath\n",
"\n",
"$files = $items | Where-Object { -not $_.PSIsContainer }\n",
"\n",
"$sortedFiles = $files | Sort-Object LastWriteTime\n",
"\n",
"foreach ($file in $sortedFiles) {\n",
" Write-Output (\"Name: \" + $file.Name + \" | Last Write Time: \" + $file.LastWriteTime)\n",
"}\n",
"\"\"\"\n",
"powershell_splitter = RecursiveCharacterTextSplitter.from_language(\n",
" language=Language.POWERSHELL, chunk_size=100, chunk_overlap=0\n",
")\n",
"powershell_docs = powershell_splitter.create_documents([POWERSHELL_CODE])\n",
"powershell_docs"
"haskell_docs = php_splitter.create_documents([PHP_CODE])\n",
"haskell_docs"
]
}
],

View File

@@ -48,10 +48,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "40ed76a2",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai\n",
"\n",
@@ -409,7 +419,7 @@
" # When configuring the end runnable, we can then use this id to configure this field\n",
" ConfigurableField(id=\"prompt\"),\n",
" # This sets a default_key.\n",
" # If we specify this key, the default prompt (asking for a joke, as initialized above) will be used\n",
" # If we specify this key, the default LLM (ChatAnthropic initialized above) will be used\n",
" default_key=\"joke\",\n",
" # This adds a new option, with name `poem`\n",
" poem=PromptTemplate.from_template(\"Write a short poem about {topic}\"),\n",
@@ -494,7 +504,7 @@
" # When configuring the end runnable, we can then use this id to configure this field\n",
" ConfigurableField(id=\"prompt\"),\n",
" # This sets a default_key.\n",
" # If we specify this key, the default prompt (asking for a joke, as initialized above) will be used\n",
" # If we specify this key, the default LLM (ChatAnthropic initialized above) will be used\n",
" default_key=\"joke\",\n",
" # This adds a new option, with name `poem`\n",
" poem=PromptTemplate.from_template(\"Write a short poem about {topic}\"),\n",

View File

@@ -220,57 +220,6 @@
"pretty_print_docs(compressed_docs)"
]
},
{
"cell_type": "markdown",
"id": "14002ec8-7ee5-4f91-9315-dd21c3808776",
"metadata": {},
"source": [
"### `LLMListwiseRerank`\n",
"\n",
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
"\n",
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "4ab9ee9f-917e-4d6f-9344-eb7f01533228",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Document 1:\n",
"\n",
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
"\n",
"Tonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
"\n",
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence.\n"
]
}
],
"source": [
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"\n",
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
"compression_retriever = ContextualCompressionRetriever(\n",
" base_compressor=_filter, base_retriever=retriever\n",
")\n",
"\n",
"compressed_docs = compression_retriever.invoke(\n",
" \"What did the president say about Ketanji Jackson Brown\"\n",
")\n",
"pretty_print_docs(compressed_docs)"
]
},
{
"cell_type": "markdown",
"id": "7194da42",
@@ -346,7 +295,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "617a1756",
"metadata": {},
"outputs": [],

View File

@@ -1,549 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
"metadata": {},
"source": [
"# How to convert Runnables as Tools\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Runnables](/docs/concepts#runnable-interface)\n",
"- [Tools](/docs/concepts#tools)\n",
"- [Agents](/docs/tutorials/agents)\n",
"\n",
":::\n",
"\n",
"Here we will demonstrate how to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n",
"\n",
"## Dependencies\n",
"\n",
"**Note**: this guide requires `langchain-core` >= 0.2.13. We will also use [OpenAI](/docs/integrations/platforms/openai/) for embeddings, but any LangChain embeddings should suffice. We will use a simple [LangGraph](https://langchain-ai.github.io/langgraph/) agent for demonstration purposes."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "92341f48-2c29-4ce9-8ab8-0a7c7a7c98a1",
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install -U langchain-core langchain-openai langgraph"
]
},
{
"cell_type": "markdown",
"id": "2b0dcc1a-48e8-4a81-b920-3563192ce076",
"metadata": {},
"source": [
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
"\n",
"LangChain tools-- instances of [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
"\n",
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
"- They contain names and descriptions indicating how and when they should be used;\n",
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
"\n",
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
]
},
{
"cell_type": "markdown",
"id": "b4d76680-1b6b-4862-8c4f-22766a1d41f2",
"metadata": {},
"source": [
"## Basic usage\n",
"\n",
"With typed `dict` input:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3",
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain_core.runnables import RunnableLambda\n",
"from typing_extensions import TypedDict\n",
"\n",
"\n",
"class Args(TypedDict):\n",
" a: int\n",
" b: List[int]\n",
"\n",
"\n",
"def f(x: Args) -> str:\n",
" return str(x[\"a\"] * max(x[\"b\"]))\n",
"\n",
"\n",
"runnable = RunnableLambda(f)\n",
"as_tool = runnable.as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "57f2d435-624d-459a-903d-8509fbbde610",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Explanation of when to use tool.\n"
]
},
{
"data": {
"text/plain": [
"{'title': 'My tool',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
" 'b': {'title': 'B', 'type': 'array', 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"print(as_tool.description)\n",
"\n",
"as_tool.args_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'6'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"as_tool.invoke({\"a\": 3, \"b\": [1, 2]})"
]
},
{
"cell_type": "markdown",
"id": "9038f587-4613-4f50-b349-135f9e7e3b15",
"metadata": {},
"source": [
"Without typing information, arg types can be specified via `arg_types`:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "169f733c-4936-497f-8577-ee769dc16b88",
"metadata": {},
"outputs": [],
"source": [
"from typing import Any, Dict\n",
"\n",
"\n",
"def g(x: Dict[str, Any]) -> str:\n",
" return str(x[\"a\"] * max(x[\"b\"]))\n",
"\n",
"\n",
"runnable = RunnableLambda(g)\n",
"as_tool = runnable.as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
" arg_types={\"a\": int, \"b\": List[int]},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
"metadata": {},
"source": [
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "eb102705-89b7-48dc-9158-d36d5f98ae8e",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GSchema(BaseModel):\n",
" \"\"\"Apply a function to an integer and list of integers.\"\"\"\n",
"\n",
" a: int = Field(..., description=\"Integer\")\n",
" b: List[int] = Field(..., description=\"List of ints\")\n",
"\n",
"\n",
"runnable = RunnableLambda(g)\n",
"as_tool = runnable.as_tool(GSchema)"
]
},
{
"cell_type": "markdown",
"id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c",
"metadata": {},
"source": [
"String input is also supported:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13",
"metadata": {},
"outputs": [],
"source": [
"def f(x: str) -> str:\n",
" return x + \"a\"\n",
"\n",
"\n",
"def g(x: str) -> str:\n",
" return x + \"z\"\n",
"\n",
"\n",
"runnable = RunnableLambda(f) | g\n",
"as_tool = runnable.as_tool()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'baz'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"as_tool.invoke(\"b\")"
]
},
{
"cell_type": "markdown",
"id": "89fdb3a7-d228-48f0-8f73-262af4febb58",
"metadata": {},
"source": [
"## In agents\n",
"\n",
"Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/#agents) application. We will demonstrate with:\n",
"\n",
"- a document [retriever](/docs/concepts/#retrievers);\n",
"- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n",
"\n",
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d06c9f2a-4475-450f-9106-54db1d99623b",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d",
"metadata": {},
"source": [
"Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"from langchain_core.vectorstores import InMemoryVectorStore\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"documents = [\n",
" Document(\n",
" page_content=\"Dogs are great companions, known for their loyalty and friendliness.\",\n",
" ),\n",
" Document(\n",
" page_content=\"Cats are independent pets that often enjoy their own space.\",\n",
" ),\n",
"]\n",
"\n",
"vectorstore = InMemoryVectorStore.from_documents(\n",
" documents, embedding=OpenAIEmbeddings()\n",
")\n",
"\n",
"retriever = vectorstore.as_retriever(\n",
" search_type=\"similarity\",\n",
" search_kwargs={\"k\": 1},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
"metadata": {},
"source": [
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"tools = [\n",
" retriever.as_tool(\n",
" name=\"pet_info_retriever\",\n",
" description=\"Get information about pets.\",\n",
" )\n",
"]\n",
"agent = create_react_agent(llm, tools)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "be29437b-a187-4a0a-9a5d-419c56f2434e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"----\n"
]
}
],
"source": [
"for chunk in agent.stream({\"messages\": [(\"human\", \"What are dogs known for?\")]}):\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa",
"metadata": {},
"source": [
"See [LangSmith trace](https://smith.langchain.com/public/44e438e3-2faf-45bd-b397-5510fc145eb9/r) for the above run."
]
},
{
"cell_type": "markdown",
"id": "a722fd8a-b957-4ba7-b408-35596b76835f",
"metadata": {},
"source": [
"Going further, we can create a simple [RAG](/docs/tutorials/rag/) chain that takes an additional parameter-- here, the \"style\" of the answer."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "bea518c9-c711-47c2-b8cc-dbd102f71f09",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an assistant for question-answering tasks.\n",
"Use the below context to answer the question. If\n",
"you don't know the answer, say you don't know.\n",
"Use three sentences maximum and keep the answer\n",
"concise.\n",
"\n",
"Answer in the style of {answer_style}.\n",
"\n",
"Question: {question}\n",
"\n",
"Context: {context}\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([(\"system\", system_prompt)])\n",
"\n",
"rag_chain = (\n",
" {\n",
" \"context\": itemgetter(\"question\") | retriever,\n",
" \"question\": itemgetter(\"question\"),\n",
" \"answer_style\": itemgetter(\"answer_style\"),\n",
" }\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"id": "955a23db-5218-4c34-8486-450a2ddb3443",
"metadata": {},
"source": [
"Note that the input schema for our chain contains the required arguments, so it converts to a tool without further specification:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "2c9f6e61-80ed-4abb-8e77-84de3ccbc891",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'RunnableParallel<context,question,answer_style>Input',\n",
" 'type': 'object',\n",
" 'properties': {'question': {'title': 'Question'},\n",
" 'answer_style': {'title': 'Answer Style'}}}"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rag_chain.input_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "a3f9cf5b-8c71-4b0f-902b-f92e028780c9",
"metadata": {},
"outputs": [],
"source": [
"rag_tool = rag_chain.as_tool(\n",
" name=\"pet_expert\",\n",
" description=\"Get information about pets.\",\n",
")"
]
},
{
"cell_type": "markdown",
"id": "4570615b-8f96-4d97-ae01-1c08b14be584",
"metadata": {},
"source": [
"Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "06409913-a2ad-400f-a202-7b8dd2ef483a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"----\n"
]
}
],
"source": [
"agent = create_react_agent(llm, [rag_tool])\n",
"\n",
"for chunk in agent.stream(\n",
" {\"messages\": [(\"human\", \"What would a pirate say dogs are known for?\")]}\n",
"):\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "96cc9bc3-e79e-49a8-9915-428ea225358b",
"metadata": {},
"source": [
"See [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -131,7 +131,7 @@
"source": [
"## Base Chat Model\n",
"\n",
"Let's implement a chat model that echoes back the first `n` characters of the last message in the prompt!\n",
"Let's implement a chat model that echoes back the first `n` characetrs of the last message in the prompt!\n",
"\n",
"To do so, we will inherit from `BaseChatModel` and we'll need to implement the following:\n",
"\n",

View File

@@ -5,7 +5,7 @@
"id": "5436020b",
"metadata": {},
"source": [
"# How to create tools\n",
"# How to create custom tools\n",
"\n",
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
"\n",
@@ -16,15 +16,13 @@
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
"\n",
"LangChain supports the creation of tools from:\n",
"LangChain provides 3 ways to create tools:\n",
"\n",
"1. Functions;\n",
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
"\n",
"In this guide we provide an overview of these methods.\n",
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
"\n",
":::{.callout-tip}\n",
"\n",
@@ -37,9 +35,7 @@
"id": "c7326b23",
"metadata": {},
"source": [
"## Creating tools from functions\n",
"\n",
"### @tool decorator\n",
"## @tool decorator\n",
"\n",
"This `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided. "
]
@@ -55,7 +51,7 @@
"output_type": "stream",
"text": [
"multiply\n",
"Multiply two numbers.\n",
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
]
}
@@ -100,57 +96,6 @@
" return a * b"
]
},
{
"cell_type": "markdown",
"id": "8f0edc51-c586-414c-8941-c8abe779943f",
"metadata": {},
"source": [
"Note that `@tool` supports parsing of annotations, nested schemas, and other features:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5626423f-053e-4a66-adca-1d794d835397",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'multiply_by_maxSchema',\n",
" 'description': 'Multiply a by the maximum of b.',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A',\n",
" 'description': 'scale factor',\n",
" 'type': 'string'},\n",
" 'b': {'title': 'B',\n",
" 'description': 'list of ints over which to take maximum',\n",
" 'type': 'array',\n",
" 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Annotated, List\n",
"\n",
"\n",
"@tool\n",
"def multiply_by_max(\n",
" a: Annotated[str, \"scale factor\"],\n",
" b: Annotated[List[int], \"list of ints over which to take maximum\"],\n",
") -> int:\n",
" \"\"\"Multiply a by the maximum of b.\"\"\"\n",
" return a * max(b)\n",
"\n",
"\n",
"multiply_by_max.args_schema.schema()"
]
},
{
"cell_type": "markdown",
"id": "98d6eee9",
@@ -161,7 +106,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
"metadata": {},
"outputs": [
@@ -170,7 +115,7 @@
"output_type": "stream",
"text": [
"multiplication-tool\n",
"Multiply two numbers.\n",
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"True\n"
]
@@ -198,84 +143,19 @@
"print(multiply.return_direct)"
]
},
{
"cell_type": "markdown",
"id": "33a9e94d-0b60-48f3-a4c2-247dce096e66",
"metadata": {},
"source": [
"#### Docstring parsing"
]
},
{
"cell_type": "markdown",
"id": "6d0cb586-93d4-4ff1-9779-71df7853cb68",
"metadata": {},
"source": [
"`@tool` can optionally parse [Google Style docstrings](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) and associate the docstring components (such as arg descriptions) to the relevant parts of the tool schema. To toggle this behavior, specify `parse_docstring`:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'fooSchema',\n",
" 'description': 'The foo.',\n",
" 'type': 'object',\n",
" 'properties': {'bar': {'title': 'Bar',\n",
" 'description': 'The bar.',\n",
" 'type': 'string'},\n",
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz']}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"@tool(parse_docstring=True)\n",
"def foo(bar: str, baz: int) -> str:\n",
" \"\"\"The foo.\n",
"\n",
" Args:\n",
" bar: The bar.\n",
" baz: The baz.\n",
" \"\"\"\n",
" return bar\n",
"\n",
"\n",
"foo.args_schema.schema()"
]
},
{
"cell_type": "markdown",
"id": "f18a2503-5393-421b-99fa-4a01dd824d0e",
"metadata": {},
"source": [
":::{.callout-caution}\n",
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) for detail and examples.\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "b63fcc3b",
"metadata": {},
"source": [
"### StructuredTool\n",
"## StructuredTool\n",
"\n",
"The `StructuredTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 4,
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
"metadata": {},
"outputs": [
@@ -318,7 +198,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 5,
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
"metadata": {},
"outputs": [
@@ -328,7 +208,7 @@
"text": [
"6\n",
"Calculator\n",
"multiply numbers\n",
"Calculator(a: int, b: int) -> int - multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
]
}
@@ -359,63 +239,6 @@
"print(calculator.args)"
]
},
{
"cell_type": "markdown",
"id": "5517995d-54e3-449b-8fdb-03561f5e4647",
"metadata": {},
"source": [
"## Creating tools from Runnables\n",
"\n",
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
"\n",
"Example usage:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.language_models import GenericFakeChatModel\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"human\", \"Hello. Please respond in the style of {answer_style}.\")]\n",
")\n",
"\n",
"# Placeholder LLM\n",
"llm = GenericFakeChatModel(messages=iter([\"hello matey\"]))\n",
"\n",
"chain = prompt | llm | StrOutputParser()\n",
"\n",
"as_tool = chain.as_tool(\n",
" name=\"Style responder\", description=\"Description of when to use tool.\"\n",
")\n",
"as_tool.args"
]
},
{
"cell_type": "markdown",
"id": "0521b787-a146-45a6-8ace-ae1ac4669dd7",
"metadata": {},
"source": [
"See [this guide](/docs/how_to/convert_runnable_to_tool) for more detail."
]
},
{
"cell_type": "markdown",
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
@@ -428,7 +251,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 16,
"id": "1dad8f8e",
"metadata": {},
"outputs": [],
@@ -477,7 +300,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 7,
"id": "bb551c33",
"metadata": {},
"outputs": [
@@ -528,7 +351,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 8,
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
"metadata": {},
"outputs": [
@@ -560,7 +383,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 9,
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
"metadata": {},
"outputs": [
@@ -605,7 +428,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 10,
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
"metadata": {},
"outputs": [
@@ -650,7 +473,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 11,
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
"metadata": {},
"outputs": [],
@@ -673,7 +496,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 12,
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
"metadata": {},
"outputs": [
@@ -683,7 +506,7 @@
"'Error: There is no city by the name of foobar.'"
]
},
"execution_count": 16,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -707,7 +530,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 13,
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
"metadata": {},
"outputs": [
@@ -717,7 +540,7 @@
"\"There is no such city, but it's probably above 0K there!\""
]
},
"execution_count": 17,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@@ -741,7 +564,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 14,
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
"metadata": {},
"outputs": [
@@ -751,7 +574,7 @@
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
]
},
"execution_count": 18,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -768,189 +591,13 @@
"\n",
"get_weather_tool.invoke({\"city\": \"foobar\"})"
]
},
{
"cell_type": "markdown",
"id": "1a8d8383-11b3-445e-956f-df4e96995e00",
"metadata": {},
"source": [
"## Returning artifacts of Tool execution\n",
"\n",
"Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
"\n",
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
"\n",
":::info Requires ``langchain-core >= 0.2.19``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
"\n",
":::\n",
"\n",
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"from typing import List, Tuple\n",
"\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool(response_format=\"content_and_artifact\")\n",
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
" array = [random.randint(min, max) for _ in range(size)]\n",
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
" return content, array"
]
},
{
"cell_type": "markdown",
"id": "49f057a6-8938-43ea-8faf-ae41e797ceb8",
"metadata": {},
"source": [
"If we invoke our tool directly with the tool arguments, we'll get back just the content part of the output:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Successfully generated array of 10 random ints in [0, 9].'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
]
},
{
"cell_type": "markdown",
"id": "1e62ebba-1737-4b97-b61a-7313ade4e8c2",
"metadata": {},
"source": [
"If we invoke our tool with a ToolCall (like the ones generated by tool-calling models), we'll get back a ToolMessage that contains both the content and artifact generated by the Tool:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke(\n",
" {\n",
" \"name\": \"generate_random_ints\",\n",
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
" \"id\": \"123\", # required\n",
" \"type\": \"tool_call\", # required\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "dfdc1040-bf25-4790-b4c3-59452db84e11",
"metadata": {},
"source": [
"We can do the same when subclassing BaseTool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import BaseTool\n",
"\n",
"\n",
"class GenerateRandomFloats(BaseTool):\n",
" name: str = \"generate_random_floats\"\n",
" description: str = \"Generate size random floats in the range [min, max].\"\n",
" response_format: str = \"content_and_artifact\"\n",
"\n",
" ndigits: int = 2\n",
"\n",
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
" range_ = max - min\n",
" array = [\n",
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
" for _ in range(size)\n",
" ]\n",
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
" return content, array\n",
"\n",
" # Optionally define an equivalent async method\n",
"\n",
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
" # ..."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
"\n",
"rand_gen.invoke(\n",
" {\n",
" \"name\": \"generate_random_floats\",\n",
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
" \"id\": \"123\",\n",
" \"type\": \"tool_call\",\n",
" }\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "poetry-venv-311"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -962,7 +609,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
},
"vscode": {
"interpreter": {

View File

@@ -63,7 +63,7 @@
"* The `load` methods is a convenience method meant solely for prototyping work -- it just invokes `list(self.lazy_load())`.\n",
"* The `alazy_load` has a default implementation that will delegate to `lazy_load`. If you're using async, we recommend overriding the default implementation and providing a native async implementation.\n",
"\n",
":::{.callout-important}\n",
"::: {.callout-important}\n",
"When implementing a document loader do **NOT** provide parameters via the `lazy_load` or `alazy_load` methods.\n",
"\n",
"All configuration is expected to be passed through the initializer (__init__). This was a design choice made by LangChain to make sure that once a document loader has been instantiated it has all the information needed to load documents.\n",
@@ -235,7 +235,7 @@
"id": "56cb443e-f987-4386-b4ec-975ee129adb2",
"metadata": {},
"source": [
":::{.callout-tip}\n",
"::: {.callout-tip}\n",
"\n",
"`load()` can be helpful in an interactive environment such as a jupyter notebook.\n",
"\n",
@@ -276,7 +276,7 @@
"source": [
"## Working with Files\n",
"\n",
"Many document loaders involve parsing files. The difference between such loaders usually stems from how the file is parsed, rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text.\n",
"Many document loaders invovle parsing files. The difference between such loaders usually stems from how the file is parsed rather than how the file is loaded. For example, you can use `open` to read the binary content of either a PDF or a markdown file, but you need different parsing logic to convert that binary data into text.\n",
"\n",
"As a result, it can be helpful to decouple the parsing logic from the loading logic, which makes it easier to re-use a given parser regardless of how the data was loaded.\n",
"\n",
@@ -355,7 +355,7 @@
"id": "433bfb7c-7767-43bc-b71e-42413d7494a8",
"metadata": {},
"source": [
"Using the **blob** API also allows one to load content directly from memory without having to read it from a file!"
"Using the **blob** API also allows one to load content direclty from memory without having to read it from a file!"
]
},
{

Some files were not shown because too many files have changed in this diff Show More