Compare commits

..

1 Commits

Author SHA1 Message Date
William Fu-Hinthorn
5f632f8c11 [Core] Expand typing for RunnableLike
Include callables whose second argument is a RunnableConfig
2024-06-26 12:49:05 -07:00
1455 changed files with 15470 additions and 36065 deletions

View File

@@ -4,6 +4,9 @@ contact_links:
- name: 🤔 Question or Problem
about: Ask a question or ask about a problem in GitHub Discussions.
url: https://www.github.com/langchain-ai/langchain/discussions/categories/q-a
- name: Discord
url: https://discord.gg/6adMQxSpJS
about: General community discussions
- name: Feature Request
url: https://www.github.com/langchain-ai/langchain/discussions/categories/ideas
about: Suggest a feature or an idea

View File

@@ -350,7 +350,11 @@ def get_graphql_pr_edges(*, settings: Settings, after: Union[str, None] = None):
print("Querying PRs...")
else:
print(f"Querying PRs with cursor {after}...")
data = get_graphql_response(settings=settings, query=prs_query, after=after)
data = get_graphql_response(
settings=settings,
query=prs_query,
after=after
)
graphql_response = PRsResponse.model_validate(data)
return graphql_response.data.repository.pullRequests.edges
@@ -480,16 +484,10 @@ def get_contributors(settings: Settings):
lines_changed = pr.additions + pr.deletions
score = _logistic(files_changed, 20) + _logistic(lines_changed, 100)
contributor_scores[pr.author.login] += score
three_months_ago = datetime.now(timezone.utc) - timedelta(days=3 * 30)
three_months_ago = (datetime.now(timezone.utc) - timedelta(days=3*30))
if pr.createdAt > three_months_ago:
recent_contributor_scores[pr.author.login] += score
return (
contributors,
contributor_scores,
recent_contributor_scores,
reviewers,
authors,
)
return contributors, contributor_scores, recent_contributor_scores, reviewers, authors
def get_top_users(
@@ -526,13 +524,9 @@ if __name__ == "__main__":
# question_commentors, question_last_month_commentors, question_authors = get_experts(
# settings=settings
# )
(
contributors,
contributor_scores,
recent_contributor_scores,
reviewers,
pr_authors,
) = get_contributors(settings=settings)
contributors, contributor_scores, recent_contributor_scores, reviewers, pr_authors = get_contributors(
settings=settings
)
# authors = {**question_authors, **pr_authors}
authors = {**pr_authors}
maintainers_logins = {
@@ -553,7 +547,6 @@ if __name__ == "__main__":
"obi1kenobi",
"langchain-infra",
"jacoblee93",
"isahers1",
"dqbd",
"bracesproul",
"akira",
@@ -565,7 +558,7 @@ if __name__ == "__main__":
maintainers.append(
{
"login": login,
"count": contributors[login], # + question_commentors[login],
"count": contributors[login], #+ question_commentors[login],
"avatarUrl": user.avatarUrl,
"twitterUsername": user.twitterUsername,
"url": user.url,
@@ -621,7 +614,9 @@ if __name__ == "__main__":
new_people_content = yaml.dump(
people, sort_keys=False, width=200, allow_unicode=True
)
if people_old_content == new_people_content:
if (
people_old_content == new_people_content
):
logging.info("The LangChain People data hasn't changed, finishing.")
sys.exit(0)
people_path.write_text(new_people_content, encoding="utf-8")
@@ -634,7 +629,9 @@ if __name__ == "__main__":
logging.info(f"Creating a new branch {branch_name}")
subprocess.run(["git", "checkout", "-B", branch_name], check=True)
logging.info("Adding updated file")
subprocess.run(["git", "add", str(people_path)], check=True)
subprocess.run(
["git", "add", str(people_path)], check=True
)
logging.info("Committing updated file")
message = "👥 Update LangChain people data"
result = subprocess.run(["git", "commit", "-m", message], check=True)
@@ -643,4 +640,4 @@ if __name__ == "__main__":
logging.info("Creating PR")
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
logging.info(f"Created PR: {pr.number}")
logging.info("Finished")
logging.info("Finished")

View File

@@ -1,12 +1,11 @@
import glob
import json
import os
import re
import sys
import tomllib
from collections import defaultdict
import os
from typing import Dict, List, Set
import tomllib
from collections import defaultdict
import glob
LANGCHAIN_DIRS = [
"libs/core",
@@ -16,13 +15,8 @@ LANGCHAIN_DIRS = [
"libs/experimental",
]
def all_package_dirs() -> Set[str]:
return {
"/".join(path.split("/")[:-1]).lstrip("./")
for path in glob.glob("./libs/**/pyproject.toml", recursive=True)
if "libs/cli" not in path and "libs/standard-tests" not in path
}
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
def dependents_graph() -> dict:
@@ -32,9 +26,9 @@ def dependents_graph() -> dict:
if "template" in path:
continue
with open(path, "rb") as f:
pyproject = tomllib.load(f)["tool"]["poetry"]
pyproject = tomllib.load(f)['tool']['poetry']
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
for dep in pyproject["dependencies"]:
for dep in pyproject['dependencies']:
if "langchain" in dep:
dependents[dep].add(pkg_dir)
return dependents
@@ -53,44 +47,6 @@ def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
return list(updated)
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
min_python = "3.8"
max_python = "3.12"
# custom logic for specific directories
if dir_ == "libs/partners/milvus":
# milvus poetry doesn't allow 3.12 because they
# declare deps in funny way
max_python = "3.11"
return [
{"working-directory": dir_, "python-version": min_python},
{"working-directory": dir_, "python-version": max_python},
]
def _get_configs_for_multi_dirs(
job: str, dirs_to_run: List[str], dependents: dict
) -> List[Dict[str, str]]:
if job == "lint":
dirs = add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
dependents,
)
elif job in ["test", "compile-integration-tests", "dependencies"]:
dirs = add_dependents(
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
)
elif job == "extended-tests":
dirs = list(dirs_to_run["extended-test"])
else:
raise ValueError(f"Unknown job: {job}")
return [
config for dir_ in dirs for config in _get_configs_for_single_dir(job, dir_)
]
if __name__ == "__main__":
files = sys.argv[1:]
@@ -164,23 +120,14 @@ if __name__ == "__main__":
dependents = dependents_graph()
# we now have dirs_by_job
# todo: clean this up
map_job_to_configs = {
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
for job in [
"lint",
"test",
"extended-tests",
"compile-integration-tests",
"dependencies",
]
outputs = {
"dirs-to-lint": add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
),
"dirs-to-test": add_dependents(dirs_to_run["test"] | dirs_to_run["extended-test"], dependents),
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
"docs-edited": "true" if docs_edited else "",
}
map_job_to_configs["test-doc-imports"] = (
[{"python-version": "3.12"}] if docs_edited else []
)
for key, value in map_job_to_configs.items():
for key, value in outputs.items():
json_output = json.dumps(value)
print(f"{key}={json_output}")

View File

@@ -9,7 +9,6 @@ MIN_VERSION_LIBS = [
"langchain-community",
"langchain",
"langchain-text-splitters",
"SQLAlchemy",
]
@@ -75,4 +74,6 @@ if __name__ == "__main__":
# Call the function to get the minimum versions
min_versions = get_min_version_from_toml(toml_file)
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
print(
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
)

View File

@@ -7,10 +7,6 @@ on:
required: true
type: string
description: "From which folder this pipeline executes"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -21,14 +17,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: compile-integration

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,14 +21,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: dependency checks ${{ inputs.python-version }}
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: dependency checks ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: pydantic-cross-compat

View File

@@ -6,10 +6,6 @@ on:
working-directory:
required: true
type: string
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -20,14 +16,19 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: Python ${{ inputs.python-version }}
strategy:
matrix:
python-version:
- "3.8"
- "3.11"
name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,15 +21,27 @@ env:
jobs:
build:
name: "make lint #${{ inputs.python-version }}"
name: "make lint #${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
matrix:
# Only lint on the min and max supported Python versions.
# It's extremely unlikely that there's a lint issue on any version in between
# that doesn't show up on the min or max versions.
#
# GitHub rate-limits how many jobs can be running at any one time.
# Starting new jobs is also relatively slow,
# so linting on fewer versions makes CI faster.
python-version:
- "3.8"
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: lint-with-extras
@@ -78,7 +86,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
- name: Analysing the code with our lint
@@ -112,7 +120,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache_test
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
- name: Analysing the code with our lint
working-directory: ${{ inputs.working-directory }}

View File

@@ -122,6 +122,7 @@ jobs:
fi
{
echo 'release-body<<EOF'
echo "# Release $TAG"
echo $PREAMBLE
echo
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
@@ -134,7 +135,6 @@ jobs:
- release-notes
uses:
./.github/workflows/_test_release.yml
permissions: write-all
with:
working-directory: ${{ inputs.working-directory }}
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,14 +21,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "make test #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: "make test #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core

View File

@@ -2,11 +2,6 @@ name: test_doc_imports
on:
workflow_call:
inputs:
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -14,14 +9,18 @@ env:
jobs:
build:
runs-on: ubuntu-latest
name: "check doc imports #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.12"
name: "check doc imports #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: core

View File

@@ -33,96 +33,91 @@ jobs:
run: |
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
outputs:
lint: ${{ steps.set-matrix.outputs.lint }}
test: ${{ steps.set-matrix.outputs.test }}
extended-tests: ${{ steps.set-matrix.outputs.extended-tests }}
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
lint:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.lint != '[]' }}
if: ${{ needs.build.outputs.dirs-to-lint != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-lint) }}
uses: ./.github/workflows/_lint.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.test != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test-doc-imports:
needs: [ build ]
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
with:
python-version: ${{ matrix.job-configs.python-version }}
compile-integration-tests:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.compile-integration-tests != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_compile_integration_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
dependencies:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.dependencies != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_dependencies.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
extended-tests:
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
needs: [ build ]
if: ${{ needs.build.outputs.extended-tests != '[]' }}
if: ${{ needs.build.outputs.dirs-to-extended-test != '[]' }}
strategy:
matrix:
# note different variable for extended test dirs
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-extended-test) }}
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
runs-on: ubuntu-latest
defaults:
run:
working-directory: ${{ matrix.job-configs.working-directory }}
working-directory: ${{ matrix.working-directory }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.job-configs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.job-configs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ matrix.job-configs.working-directory }}
working-directory: ${{ matrix.working-directory }}
cache-key: extended
- name: Install dependencies

View File

@@ -26,11 +26,6 @@ jobs:
python-version: '3.10'
- id: files
uses: Ana06/get-changed-files@v2.2.0
with:
filter: |
*.ipynb
*.md
*.mdx
- name: Check new docs
run: |
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}

View File

@@ -16,7 +16,6 @@ jobs:
langchain-people:
if: github.repository_owner == 'langchain-ai'
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Dump GitHub context
env:

View File

@@ -27,6 +27,7 @@ jobs:
- "libs/partners/groq"
- "libs/partners/mistralai"
- "libs/partners/together"
- "libs/partners/cohere"
- "libs/partners/google-vertexai"
- "libs/partners/google-genai"
- "libs/partners/aws"
@@ -39,6 +40,10 @@ jobs:
with:
repository: langchain-ai/langchain-google
path: langchain-google
- uses: actions/checkout@v4
with:
repository: langchain-ai/langchain-cohere
path: langchain-cohere
- uses: actions/checkout@v4
with:
repository: langchain-ai/langchain-aws
@@ -48,9 +53,11 @@ jobs:
run: |
rm -rf \
langchain/libs/partners/google-genai \
langchain/libs/partners/google-vertexai
langchain/libs/partners/google-vertexai \
langchain/libs/partners/cohere
mv langchain-google/libs/genai langchain/libs/partners/google-genai
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
mv langchain-aws/libs/aws langchain/libs/partners/aws
- name: Set up Python ${{ matrix.python-version }}
@@ -109,6 +116,7 @@ jobs:
rm -rf \
langchain/libs/partners/google-genai \
langchain/libs/partners/google-vertexai \
langchain/libs/partners/cohere \
langchain/libs/partners/aws
- name: Ensure the tests did not create any additional files

View File

@@ -11,6 +11,7 @@
[![Open Issues](https://img.shields.io/github/issues-raw/langchain-ai/langchain?style=flat-square)](https://github.com/langchain-ai/langchain/issues)
[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langchain-ai/langchain)
[![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
@@ -37,25 +38,24 @@ conda install langchain -c conda-forge
For these applications, LangChain simplifies the entire application lifecycle:
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
### Open-source libraries
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
- **`langchain-community`**: Third party integrations.
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
### Productionization:
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
### Deployment:
- **[LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](docs/static/svg/langchain_stack_062024.svg "LangChain Architecture Overview")
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](docs/static/svg/langchain_stack.svg "LangChain Architecture Overview")
## 🧱 What can you build with LangChain?
@@ -106,7 +106,7 @@ Retrieval Augmented Generation involves [loading data](https://python.langchain.
**🤖 Agents**
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents), along with [LangGraph](https://github.com/langchain-ai/langgraph) for building custom agents.
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
## 📖 Documentation
@@ -120,9 +120,10 @@ Please see [here](https://python.langchain.com) for full documentation, which in
## 🌐 Ecosystem
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs.
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
## 💁 Contributing

View File

@@ -3,7 +3,6 @@
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
.. currentmodule:: {{ module }}

View File

@@ -3,8 +3,6 @@
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
.. currentmodule:: {{ module }}
.. autopydantic_model:: {{ objname }}
@@ -19,6 +17,6 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, invoke, ainvoke, batch, abatch, batch_as_completed, abatch_as_completed, astream_log, stream, astream, astream_events, transform, atransform, get_output_schema, get_prompts, configurable_fields, configurable_alternatives, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
.. example_links:: {{ objname }}

File diff suppressed because it is too large Load Diff

View File

@@ -51,8 +51,8 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
<ThemedImage
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
sources={{
light: useBaseUrl('/svg/langchain_stack_062024.svg'),
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
light: useBaseUrl('/svg/langchain_stack.svg'),
dark: useBaseUrl('/svg/langchain_stack_dark.svg'),
}}
title="LangChain Framework Overview"
/>
@@ -85,16 +85,11 @@ Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas i
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
LCEL aims to provide consistency around behavior and customization over legacy subclassed chains such as `LLMChain` and
`ConversationalRetrievalChain`. Many of these legacy chains hide important details like prompts, and as a wider variety
of viable models emerge, customization has become more and more important.
If you are currently using one of these legacy chains, please see [this guide for guidance on how to migrate](/docs/how_to/migrate_chains/).
For guides on how to do specific tasks with LCEL, check out [the relevant how-to guides](/docs/how_to/#langchain-expression-language-lcel).
[**Seamless LangServe deployment**](/docs/langserve)
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
### Runnable interface
<span data-heading-keywords="invoke,runnable"></span>
<span data-heading-keywords="invoke"></span>
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
@@ -521,8 +516,6 @@ Generally, when designing tools to be used by a chat model or LLM, it is importa
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
To use an existing pre-built tool, see [here](docs/integrations/tools/) for a list of pre-built tools.
### Toolkits
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
@@ -557,28 +550,6 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
It is recommended, however, that you start to transition to LangGraph.
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
#### ReAct agents
<span data-heading-keywords="react,react agent"></span>
One popular architecture for building agents is [**ReAct**](https://arxiv.org/abs/2210.03629).
ReAct combines reasoning and acting in an iterative process - in fact the name "ReAct" stands for "Reason" and "Act".
The general flow looks like this:
- The model will "think" about what step to take in response to an input and any previous observations.
- The model will then choose an action from available tools (or choose to respond to the user).
- The model will generate arguments to that tool.
- The agent runtime (executor) will parse out the chosen tool and call it with the generated arguments.
- The executor will return the results of the tool call back to the model as an observation.
- This process repeats until the agent chooses to respond.
There are general prompting based implementations that do not require any model-specific features, but the most
reliable implementations use features like [tool calling](/docs/how_to/tool_calling/) to reliably format outputs
and reduce variance.
Please see the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for more information,
or [this how-to guide](/docs/how_to/migrate_agent/) for specific information on migrating to LangGraph.
### Callbacks
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
@@ -783,54 +754,14 @@ a few ways to get structured output from models in LangChain.
#### `.with_structured_output()`
For convenience, some LangChain chat models support a [`.with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method)
method. This method only requires a schema as input, and returns a dict or Pydantic object.
For convenience, some LangChain chat models support a `.with_structured_output()` method.
This method only requires a schema as input, and returns a dict or Pydantic object.
Generally, this method is only present on models that support one of the more advanced methods described below,
and will use one of them under the hood. It takes care of importing a suitable output parser and
formatting the schema in the right format for the model.
Here's an example:
```python
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_llm = llm.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats")
```
```
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
```
We recommend this method as a starting point when working with structured output:
- It uses other model-specific features under the hood, without the need to import an output parser.
- For the models that use tool calling, no special prompting is needed.
- If multiple underlying techniques are supported, you can supply a `method` parameter to
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
You may want or need to use other techiniques if:
- The chat model you are using does not support tool calling.
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
You can also check out [this table](/docs/integrations/chat/#advanced-features) for a list of models that support
`with_structured_output()`.
#### Raw prompting
The most intuitive way to get a model to structure output is to ask nicely.
@@ -853,8 +784,9 @@ for smooth parsing can be surprisingly difficult and model-specific.
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
and still others may prefer XML.
While features offered by model providers may increase reliability, prompting techniques remain important for tuning your
results no matter which method you choose.
While we'll next go over some ways that you can take advantage of features offered by
model providers to increase reliability, prompting techniques remain important for tuning your
results no matter what method you choose.
#### JSON mode
<span data-heading-keywords="json mode"></span>
@@ -864,11 +796,10 @@ Some models, such as [Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/do
support a feature called **JSON mode**, usually enabled via config.
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
Often they require some custom prompting, but it's usually much less burdensome than completely raw prompting and
more along the lines of, `"you must always return JSON"`. The [output also generally easier to parse](/docs/how_to/output_parser_json/).
Often they require some custom prompting, but it's usually much less burdensome and along the lines of,
`"you must always return JSON"`, and the [output is easier to parse](/docs/how_to/output_parser_json/).
It's also generally simpler to use directly and more commonly available than tool calling, and can give
more flexibility around prompting and shaping results than tool calling.
It's also generally simpler and more commonly available than tool calling.
Here's an example:
@@ -1022,7 +953,7 @@ See our [blog post overview](https://blog.langchain.dev/query-construction/) and
#### Indexing
Fourth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
@@ -1102,7 +1033,7 @@ See several videos and cookbooks showcasing RAG with LangGraph:
- [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag)
See our LangGraph RAG recipes with partners:
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/3p_integrations/langchain)
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/use_cases/agents/langchain)
- [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain)
:::
@@ -1130,7 +1061,7 @@ Table columns:
| Token | [many classes](/docs/how_to/split_by_token/) | Tokens | | Splits text on tokens. There exist a few different ways to measure tokens. |
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
### Evaluation
<span data-heading-keywords="evaluation,evaluate"></span>
@@ -1148,13 +1079,3 @@ This process is vital for building reliable applications.
- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
### Tracing
<span data-heading-keywords="trace,tracing"></span>
A trace is essentially a series of steps that your application takes to go from input to output.
Traces contain individual steps called `runs`. These can be individual calls from a model, retriever,
tool, or sub-chains.
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing).

View File

@@ -13,15 +13,15 @@ organization and structure.
LangChain's documentation follows the [Diataxis framework](https://diataxis.fr).
Under this framework, all documentation falls under one of four categories: [Tutorials](/docs/contributing/documentation/style_guide/#tutorials),
[How-to guides](/docs/contributing/documentation/style_guide/#how-to-guides),
[References](/docs/contributing/documentation/style_guide/#references), and [Explanations](/docs/contributing/documentation/style_guide/#conceptual-guide).
[References]/docs/contributing/documentation/style_guide/#references, and [Explanations](/docs/contributing/documentation/style_guide/#conceptual-guide).
### Tutorials
Tutorials are lessons that take the reader through a practical activity. Their purpose is to help the user
gain understanding of concepts and how they interact by showing one way to achieve some goal in a hands-on way. They should **avoid** giving
multiple permutations of ways to achieve that goal in-depth. Instead, it should guide a new user through a recommended path to accomplishing the tutorial's goal. While the end result of a tutorial does not necessarily need to
be completely production-ready, it should be useful and practically satisfy the the goal that you clearly stated in the tutorial's introduction. Information on how to address additional scenarios
belongs in how-to guides.
gain understanding of concepts and how they interact by showing one way to achieve some goal in a hands-on way. They should not cover
multiple permutations of ways to achieve that goal in-depth, and the end result of a tutorial does not need to
be completely production-ready against all cases. Information on how to address additional scenarios
can occur in how-to guides.
To quote the Diataxis website:
@@ -33,22 +33,20 @@ Some examples include:
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
A good structural rule of thumb is to follow the structure of this [example from Numpy](https://numpy.org/numpy-tutorials/content/tutorial-svd.html).
Here are some high-level tips on writing a good tutorial:
- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system.
- Be specific, not abstract and follow one path.
- No need to go deeply into alternative approaches, but its ok to reference them, ideally with a link to an appropriate how-to guide.
- Get "a point on the board" as soon as possible - something the user can run that outputs something.
- You can iterate and expand afterwards.
- Try to frequently checkpoint at given steps where the user can run code and see progress.
- Focus on results, not technical explanation.
- Crosslink heavily to appropriate conceptual/reference pages.
- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page.
- It's also helpful to add a prerequisite callout that links to any pages with necessary background information.
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides.
- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system
- Be specific, not abstract and follow one path
- No need to go deeply into alternative approaches, but its ok to reference them, ideally with a link to an appropriate how-to guide
- Get "a point on the board" as soon as possible - something the user can run that outputs something
- You can iterate and expand afterwards
- Try to frequently checkpoint at given steps where the user can run code and see progress
- Focus on results, not technical explanation
- Crosslink heavily to appropriate conceptual/reference pages
- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page
- It's also helpful to add a prerequisite callout that links to any pages with necessary background information
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides
### How-to guides
@@ -68,21 +66,20 @@ Some examples include:
Here are some high-level tips on writing a good how-to guide:
- Clearly explain what you are guiding the user through at the start.
- Assume higher intent than a tutorial and show what the user needs to do to get that task done.
- Assume familiarity of concepts, but explain why suggested actions are helpful.
- Crosslink heavily to conceptual/reference pages.
- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem.
- Use lots of example code.
- Prefer full code blocks that the reader can copy and run.
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides.
- Clearly explain what you are guiding the user through at the start
- Assume higher intent than a tutorial and show what the user needs to do to get that task done
- Assume familiarity of concepts, but explain why suggested actions are helpful
- Crosslink heavily to conceptual/reference pages
- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem
- Use lots of example code
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides
### Conceptual guide
LangChain's conceptual guide falls under the **Explanation** quadrant of Diataxis. They should cover LangChain terms and concepts
in a more abstract way than how-to guides or tutorials, and should be geared towards curious users interested in
gaining a deeper understanding of the framework. Try to avoid excessively large code examples - the goal here is to
impart perspective to the user rather than to finish a practical project. These guides should cover **why** things work they way they do.
gaining a deeper understanding of the framework. There should be few, if any, concrete code examples. The goal here is to
impart perspective to the user rather than to finish a practical project.
This guide on documentation style is meant to fall under this category.

View File

@@ -12,7 +12,7 @@ As an open-source project in a rapidly developing field, we are extremely open t
There are many ways to contribute to LangChain. Here are some common ways people contribute:
- [**Documentation**](/docs/contributing/documentation/): Help improve our docs, including this one!
- [**Documentation**](/docs/contributing/documentation/style_guide): Help improve our docs, including this one!
- [**Code**](/docs/contributing/code/): Help us write code, fix bugs, or improve our infrastructure.
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.

View File

@@ -11,7 +11,7 @@ There are a few different places you can contribute integrations for LangChain:
- **Community**: For lighter-weight integrations that are primarily maintained by LangChain and the Open Source Community.
- **Partner Packages**: For independent packages that are co-maintained by LangChain and a partner.
For the most part, **new integrations should be added to the Community package**. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
For the most part, new integrations should be added to the Community package. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`.
@@ -60,10 +60,6 @@ And add documentation to:
## Partner package in LangChain repo
:::caution
Before starting a **partner** package, please confirm your intent with the LangChain team. Partner packages require more maintenance as separate packages, so we will close PRs that add new partner packages without prior discussion. See the above section for how to add a community integration.
:::
Partner packages can be hosted in the `LangChain` monorepo or in an external repo.
Partner package in the `LangChain` repo is placed in `libs/partners/{partner}`

View File

@@ -7,7 +7,6 @@ If you plan on contributing to LangChain code or documentation, it can be useful
to understand the high level structure of the repository.
LangChain is organized as a [monorepo](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
You can check out our [installation guide](/docs/how_to/installation/) for more on how they fit together.
Here's the structure visualized as a tree:

View File

@@ -153,7 +153,7 @@
"\n",
"#### OpenAI\n",
"\n",
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
"\n",
"```{=mdx}\n",
":::note\n",
@@ -172,18 +172,18 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='Hello' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='!' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' How' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' can' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' I' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' assist' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
]
}
],
@@ -191,7 +191,7 @@
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"\n",
"aggregate = None\n",
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
" print(chunk)\n",
" aggregate = chunk if aggregate is None else aggregate + chunk"
]
@@ -229,7 +229,7 @@
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
"metadata": {},
"source": [
"To disable streaming token counts for OpenAI, set `stream_usage` to False, or omit it from the parameters:"
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
]
},
{
@@ -242,17 +242,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='Hello' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='!' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' How' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' can' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' I' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' assist' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
]
}
],
@@ -267,7 +267,7 @@
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
"metadata": {},
"source": [
"You can also enable streaming token usage by setting `stream_usage` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
"\n",
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
]
@@ -275,7 +275,7 @@
{
"cell_type": "code",
"execution_count": 8,
"id": "0b1523d8-127e-4314-82fa-bd97aca37f9a",
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
"metadata": {},
"outputs": [
{
@@ -301,7 +301,7 @@
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" stream_usage=True,\n",
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
")\n",
"# Under the hood, .with_structured_output binds tools to the\n",
"# chat model and appends a parser.\n",
@@ -341,7 +341,7 @@
{
"cell_type": "code",
"execution_count": 9,
"id": "b04a4486-72fd-48ce-8f9e-5d281b441195",
"id": "31667d54",
"metadata": {},
"outputs": [
{
@@ -361,11 +361,7 @@
"\n",
"from langchain_community.callbacks.manager import get_openai_callback\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" temperature=0,\n",
" stream_usage=True,\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"\n",
"with get_openai_callback() as cb:\n",
" result = llm.invoke(\"Tell me a joke\")\n",
@@ -383,14 +379,14 @@
{
"cell_type": "code",
"execution_count": 10,
"id": "05f22a1d-b021-490f-8840-f628a07459f2",
"id": "e09420f4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"54\n"
"55\n"
]
}
],
@@ -401,29 +397,37 @@
" print(cb.total_tokens)"
]
},
{
"cell_type": "markdown",
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
"metadata": {},
"source": [
"```{=mdx}\n",
":::note\n",
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
":::\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c00c9158-7bb4-4279-88e6-ea70f46e6ac2",
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tokens Used: 27\n",
"\tPrompt Tokens: 11\n",
"\tCompletion Tokens: 16\n",
"Successful Requests: 1\n",
"Total Cost (USD): $2.95e-05\n"
"28\n"
]
}
],
"source": [
"with get_openai_callback() as cb:\n",
" for chunk in llm.stream(\"Tell me a joke\"):\n",
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
" pass\n",
" print(cb)"
" print(cb.total_tokens)"
]
},
{
@@ -453,7 +457,21 @@
")\n",
"tools = load_tools([\"wikipedia\"])\n",
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, verbose=True, stream_runnable=False\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9c1ae74d-8300-4041-9ff4-66093ee592b1",
"metadata": {},
"source": [
"```{=mdx}\n",
":::note\n",
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
":::\n",
"```"
]
},
{
@@ -485,30 +503,36 @@
"\n",
"\n",
"\n",
"Page: Allen's hummingbird\n",
"Summary: Allen's hummingbird (Selasphorus sasin) is a species of hummingbird that breeds in the western United States. It is one of seven species in the genus Selasphorus.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Page: Anna's hummingbird\n",
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
"\n",
"\n",
"\n",
"Page: Fastest animals\n",
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
"\n",
"\n",
"\n",
"Page: Falcon\n",
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species in level flight is the common swift, which holds the record for the fastest confirmed level flight by a bird at 111.5 km/h (69.3 mph). The peregrine falcon is known to exceed speeds of 320 km/h (200 mph) in its dives, making it the fastest bird in terms of diving speed.\u001b[0m\n",
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"Total Tokens: 1675\n",
"Prompt Tokens: 1538\n",
"Completion Tokens: 137\n",
"Total Cost (USD): $0.0009745000000000001\n"
"Total Tokens: 1787\n",
"Prompt Tokens: 1687\n",
"Completion Tokens: 100\n",
"Total Cost (USD): $0.0009935\n"
]
}
],

View File

@@ -1,541 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
"metadata": {},
"source": [
"# How to use Runnables as Tools\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Runnables](/docs/concepts#runnable-interface)\n",
"- [Tools](/docs/concepts#tools)\n",
"- [Agents](/docs/tutorials/agents)\n",
"\n",
":::\n",
"\n",
"Here we will demonstrate how to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n",
"\n",
"## Dependencies\n",
"\n",
"**Note**: this guide requires `langchain-core` >= 0.2.13. We will also use [OpenAI](/docs/integrations/platforms/openai/) for embeddings, but any LangChain embeddings should suffice. We will use a simple [LangGraph](https://langchain-ai.github.io/langgraph/) agent for demonstration purposes."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "92341f48-2c29-4ce9-8ab8-0a7c7a7c98a1",
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install -U langchain-core langchain-openai langgraph"
]
},
{
"cell_type": "markdown",
"id": "2b0dcc1a-48e8-4a81-b920-3563192ce076",
"metadata": {},
"source": [
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
"\n",
"LangChain tools-- instances of [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
"\n",
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
"- They contain names and descriptions indicating how and when they should be used;\n",
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
"\n",
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
]
},
{
"cell_type": "markdown",
"id": "b4d76680-1b6b-4862-8c4f-22766a1d41f2",
"metadata": {},
"source": [
"## Basic usage\n",
"\n",
"With typed `dict` input:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3",
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain_core.runnables import RunnableLambda\n",
"from typing_extensions import TypedDict\n",
"\n",
"\n",
"class Args(TypedDict):\n",
" a: int\n",
" b: List[int]\n",
"\n",
"\n",
"def f(x: Args) -> str:\n",
" return str(x[\"a\"] * max(x[\"b\"]))\n",
"\n",
"\n",
"runnable = RunnableLambda(f)\n",
"as_tool = runnable.as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "57f2d435-624d-459a-903d-8509fbbde610",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Explanation of when to use tool.\n"
]
},
{
"data": {
"text/plain": [
"{'title': 'My tool',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
" 'b': {'title': 'B', 'type': 'array', 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"print(as_tool.description)\n",
"\n",
"as_tool.args_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'6'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"as_tool.invoke({\"a\": 3, \"b\": [1, 2]})"
]
},
{
"cell_type": "markdown",
"id": "9038f587-4613-4f50-b349-135f9e7e3b15",
"metadata": {},
"source": [
"Without typing information, arg types can be specified via `arg_types`:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "169f733c-4936-497f-8577-ee769dc16b88",
"metadata": {},
"outputs": [],
"source": [
"from typing import Any, Dict\n",
"\n",
"\n",
"def g(x: Dict[str, Any]) -> str:\n",
" return str(x[\"a\"] * max(x[\"b\"]))\n",
"\n",
"\n",
"runnable = RunnableLambda(g)\n",
"as_tool = runnable.as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
" arg_types={\"a\": int, \"b\": List[int]},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
"metadata": {},
"source": [
"Alternatively, we can add typing information via [Runnable.with_types](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_types):"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "eb102705-89b7-48dc-9158-d36d5f98ae8e",
"metadata": {},
"outputs": [],
"source": [
"as_tool = runnable.with_types(input_type=Args).as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
")"
]
},
{
"cell_type": "markdown",
"id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c",
"metadata": {},
"source": [
"String input is also supported:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13",
"metadata": {},
"outputs": [],
"source": [
"def f(x: str) -> str:\n",
" return x + \"a\"\n",
"\n",
"\n",
"def g(x: str) -> str:\n",
" return x + \"z\"\n",
"\n",
"\n",
"runnable = RunnableLambda(f) | g\n",
"as_tool = runnable.as_tool()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'baz'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"as_tool.invoke(\"b\")"
]
},
{
"cell_type": "markdown",
"id": "89fdb3a7-d228-48f0-8f73-262af4febb58",
"metadata": {},
"source": [
"## In agents\n",
"\n",
"Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/#agents) application. We will demonstrate with:\n",
"\n",
"- a document [retriever](/docs/concepts/#retrievers);\n",
"- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n",
"\n",
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
"\n",
"```{=mdx}\n",
"<ChatModelTabs\n",
" customVarName=\"llm\"\n",
"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d06c9f2a-4475-450f-9106-54db1d99623b",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d",
"metadata": {},
"source": [
"Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"from langchain_core.vectorstores import InMemoryVectorStore\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"documents = [\n",
" Document(\n",
" page_content=\"Dogs are great companions, known for their loyalty and friendliness.\",\n",
" ),\n",
" Document(\n",
" page_content=\"Cats are independent pets that often enjoy their own space.\",\n",
" ),\n",
"]\n",
"\n",
"vectorstore = InMemoryVectorStore.from_documents(\n",
" documents, embedding=OpenAIEmbeddings()\n",
")\n",
"\n",
"retriever = vectorstore.as_retriever(\n",
" search_type=\"similarity\",\n",
" search_kwargs={\"k\": 1},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
"metadata": {},
"source": [
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"tools = [\n",
" retriever.as_tool(\n",
" name=\"pet_info_retriever\",\n",
" description=\"Get information about pets.\",\n",
" )\n",
"]\n",
"agent = create_react_agent(llm, tools)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "be29437b-a187-4a0a-9a5d-419c56f2434e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"----\n"
]
}
],
"source": [
"for chunk in agent.stream({\"messages\": [(\"human\", \"What are dogs known for?\")]}):\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa",
"metadata": {},
"source": [
"See [LangSmith trace](https://smith.langchain.com/public/44e438e3-2faf-45bd-b397-5510fc145eb9/r) for the above run."
]
},
{
"cell_type": "markdown",
"id": "a722fd8a-b957-4ba7-b408-35596b76835f",
"metadata": {},
"source": [
"Going further, we can create a simple [RAG](/docs/tutorials/rag/) chain that takes an additional parameter-- here, the \"style\" of the answer."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "bea518c9-c711-47c2-b8cc-dbd102f71f09",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an assistant for question-answering tasks.\n",
"Use the below context to answer the question. If\n",
"you don't know the answer, say you don't know.\n",
"Use three sentences maximum and keep the answer\n",
"concise.\n",
"\n",
"Answer in the style of {answer_style}.\n",
"\n",
"Question: {question}\n",
"\n",
"Context: {context}\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([(\"system\", system_prompt)])\n",
"\n",
"rag_chain = (\n",
" {\n",
" \"context\": itemgetter(\"question\") | retriever,\n",
" \"question\": itemgetter(\"question\"),\n",
" \"answer_style\": itemgetter(\"answer_style\"),\n",
" }\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"id": "955a23db-5218-4c34-8486-450a2ddb3443",
"metadata": {},
"source": [
"Note that the input schema for our chain contains the required arguments, so it converts to a tool without further specification:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "2c9f6e61-80ed-4abb-8e77-84de3ccbc891",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'RunnableParallel<context,question,answer_style>Input',\n",
" 'type': 'object',\n",
" 'properties': {'question': {'title': 'Question'},\n",
" 'answer_style': {'title': 'Answer Style'}}}"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rag_chain.input_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "a3f9cf5b-8c71-4b0f-902b-f92e028780c9",
"metadata": {},
"outputs": [],
"source": [
"rag_tool = rag_chain.as_tool(\n",
" name=\"pet_expert\",\n",
" description=\"Get information about pets.\",\n",
")"
]
},
{
"cell_type": "markdown",
"id": "4570615b-8f96-4d97-ae01-1c08b14be584",
"metadata": {},
"source": [
"Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "06409913-a2ad-400f-a202-7b8dd2ef483a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"----\n"
]
}
],
"source": [
"agent = create_react_agent(llm, [rag_tool])\n",
"\n",
"for chunk in agent.stream(\n",
" {\"messages\": [(\"human\", \"What would a pirate say dogs are known for?\")]}\n",
"):\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "96cc9bc3-e79e-49a8-9915-428ea225358b",
"metadata": {},
"source": [
"See [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -16,15 +16,13 @@
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
"\n",
"LangChain supports the creation of tools from:\n",
"LangChain provides 3 ways to create tools:\n",
"\n",
"1. Functions;\n",
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
"\n",
"In this guide we provide an overview of these methods.\n",
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
"\n",
":::{.callout-tip}\n",
"\n",
@@ -37,9 +35,7 @@
"id": "c7326b23",
"metadata": {},
"source": [
"## Creating tools from functions\n",
"\n",
"### @tool decorator\n",
"## @tool decorator\n",
"\n",
"This `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided. "
]
@@ -55,7 +51,7 @@
"output_type": "stream",
"text": [
"multiply\n",
"Multiply two numbers.\n",
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
]
}
@@ -100,57 +96,6 @@
" return a * b"
]
},
{
"cell_type": "markdown",
"id": "8f0edc51-c586-414c-8941-c8abe779943f",
"metadata": {},
"source": [
"Note that `@tool` supports parsing of annotations, nested schemas, and other features:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5626423f-053e-4a66-adca-1d794d835397",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'multiply_by_maxSchema',\n",
" 'description': 'Multiply a by the maximum of b.',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A',\n",
" 'description': 'scale factor',\n",
" 'type': 'string'},\n",
" 'b': {'title': 'B',\n",
" 'description': 'list of ints over which to take maximum',\n",
" 'type': 'array',\n",
" 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Annotated, List\n",
"\n",
"\n",
"@tool\n",
"def multiply_by_max(\n",
" a: Annotated[str, \"scale factor\"],\n",
" b: Annotated[List[int], \"list of ints over which to take maximum\"],\n",
") -> int:\n",
" \"\"\"Multiply a by the maximum of b.\"\"\"\n",
" return a * max(b)\n",
"\n",
"\n",
"multiply_by_max.args_schema.schema()"
]
},
{
"cell_type": "markdown",
"id": "98d6eee9",
@@ -161,7 +106,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
"metadata": {},
"outputs": [
@@ -170,7 +115,7 @@
"output_type": "stream",
"text": [
"multiplication-tool\n",
"Multiply two numbers.\n",
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"True\n"
]
@@ -198,84 +143,19 @@
"print(multiply.return_direct)"
]
},
{
"cell_type": "markdown",
"id": "33a9e94d-0b60-48f3-a4c2-247dce096e66",
"metadata": {},
"source": [
"#### Docstring parsing"
]
},
{
"cell_type": "markdown",
"id": "6d0cb586-93d4-4ff1-9779-71df7853cb68",
"metadata": {},
"source": [
"`@tool` can optionally parse [Google Style docstrings](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) and associate the docstring components (such as arg descriptions) to the relevant parts of the tool schema. To toggle this behavior, specify `parse_docstring`:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'fooSchema',\n",
" 'description': 'The foo.',\n",
" 'type': 'object',\n",
" 'properties': {'bar': {'title': 'Bar',\n",
" 'description': 'The bar.',\n",
" 'type': 'string'},\n",
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz']}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"@tool(parse_docstring=True)\n",
"def foo(bar: str, baz: int) -> str:\n",
" \"\"\"The foo.\n",
"\n",
" Args:\n",
" bar: The bar.\n",
" baz: The baz.\n",
" \"\"\"\n",
" return bar\n",
"\n",
"\n",
"foo.args_schema.schema()"
]
},
{
"cell_type": "markdown",
"id": "f18a2503-5393-421b-99fa-4a01dd824d0e",
"metadata": {},
"source": [
":::{.callout-caution}\n",
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) for detail and examples.\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "b63fcc3b",
"metadata": {},
"source": [
"### StructuredTool\n",
"## StructuredTool\n",
"\n",
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 4,
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
"metadata": {},
"outputs": [
@@ -318,7 +198,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 5,
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
"metadata": {},
"outputs": [
@@ -328,7 +208,7 @@
"text": [
"6\n",
"Calculator\n",
"multiply numbers\n",
"Calculator(a: int, b: int) -> int - multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
]
}
@@ -359,63 +239,6 @@
"print(calculator.args)"
]
},
{
"cell_type": "markdown",
"id": "5517995d-54e3-449b-8fdb-03561f5e4647",
"metadata": {},
"source": [
"## Creating tools from Runnables\n",
"\n",
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
"\n",
"Example usage:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.language_models import GenericFakeChatModel\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"human\", \"Hello. Please respond in the style of {answer_style}.\")]\n",
")\n",
"\n",
"# Placeholder LLM\n",
"llm = GenericFakeChatModel(messages=iter([\"hello matey\"]))\n",
"\n",
"chain = prompt | llm | StrOutputParser()\n",
"\n",
"as_tool = chain.as_tool(\n",
" name=\"Style responder\", description=\"Description of when to use tool.\"\n",
")\n",
"as_tool.args"
]
},
{
"cell_type": "markdown",
"id": "0521b787-a146-45a6-8ace-ae1ac4669dd7",
"metadata": {},
"source": [
"See [this guide](/docs/how_to/convert_runnable_to_tool) for more detail."
]
},
{
"cell_type": "markdown",
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
@@ -428,7 +251,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 16,
"id": "1dad8f8e",
"metadata": {},
"outputs": [],
@@ -477,7 +300,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 7,
"id": "bb551c33",
"metadata": {},
"outputs": [
@@ -528,7 +351,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 8,
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
"metadata": {},
"outputs": [
@@ -560,7 +383,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 9,
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
"metadata": {},
"outputs": [
@@ -605,7 +428,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 10,
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
"metadata": {},
"outputs": [
@@ -650,7 +473,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 11,
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
"metadata": {},
"outputs": [],
@@ -673,7 +496,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 12,
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
"metadata": {},
"outputs": [
@@ -683,7 +506,7 @@
"'Error: There is no city by the name of foobar.'"
]
},
"execution_count": 16,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -707,7 +530,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 13,
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
"metadata": {},
"outputs": [
@@ -717,7 +540,7 @@
"\"There is no such city, but it's probably above 0K there!\""
]
},
"execution_count": 17,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@@ -741,7 +564,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 14,
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
"metadata": {},
"outputs": [
@@ -751,7 +574,7 @@
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
]
},
"execution_count": 18,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -786,7 +609,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.4"
},
"vscode": {
"interpreter": {

View File

@@ -23,12 +23,12 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install unstructured"
"%pip install \"unstructured[html]\""
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "7d167ca3-c7c7-4ef0-b509-080629f0f482",
"metadata": {},
"outputs": [
@@ -36,14 +36,14 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n"
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n"
]
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredHTMLLoader\n",
"\n",
"file_path = \"../../docs/integrations/document_loaders/example_data/fake-content.html\"\n",
"file_path = \"../../../docs/integrations/document_loaders/example_data/fake-content.html\"\n",
"\n",
"loader = UnstructuredHTMLLoader(file_path)\n",
"data = loader.load()\n",
@@ -73,7 +73,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 2,
"id": "0a2050a8-6df6-4696-9889-ba367d6f9caa",
"metadata": {},
"outputs": [
@@ -81,7 +81,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n"
"[Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n"
]
}
],
@@ -111,7 +111,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.4"
}
},
"nbformat": 4,

View File

@@ -21,12 +21,12 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 19,
"id": "c8b147fb-6877-4f7a-b2ee-ee971c7bc662",
"metadata": {},
"outputs": [],
"source": [
"%pip install \"unstructured[md]\""
"# !pip install \"unstructured[md]\""
]
},
{
@@ -39,7 +39,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "80c50cc4-7ce9-4418-81b9-29c52c7b3627",
"metadata": {},
"outputs": [
@@ -62,7 +62,7 @@
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
"from langchain_core.documents import Document\n",
"\n",
"markdown_path = \"../../../README.md\"\n",
"markdown_path = \"../../../../README.md\"\n",
"loader = UnstructuredMarkdownLoader(markdown_path)\n",
"\n",
"data = loader.load()\n",
@@ -84,7 +84,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "a986bbce-7fd3-41d1-bc47-49f9f57c7cd1",
"metadata": {},
"outputs": [
@@ -92,11 +92,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Number of documents: 66\n",
"Number of documents: 65\n",
"\n",
"page_content='🦜️🔗 LangChain' metadata={'source': '../../../README.md', 'category_depth': 0, 'last_modified': '2024-06-28T15:20:01', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../..', 'filename': 'README.md', 'category': 'Title'}\n",
"page_content='🦜️🔗 LangChain' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'Title'}\n",
"\n",
"page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../README.md', 'last_modified': '2024-06-28T15:20:01', 'languages': ['eng'], 'parent_id': '200b8a7d0dd03f66e4f13456566d2b3a', 'filetype': 'text/markdown', 'file_directory': '../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n",
"page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'parent_id': 'c3223b6f7100be08a78f1e8c0c28fde1', 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n",
"\n"
]
}
@@ -121,7 +121,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"id": "75abc139-3ded-4e8e-9f21-d0c8ec40fdfc",
"metadata": {},
"outputs": [
@@ -129,21 +129,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'ListItem', 'NarrativeText', 'Title'}\n"
"{'Title', 'NarrativeText', 'ListItem'}\n"
]
}
],
"source": [
"print(set(document.metadata[\"category\"] for document in data))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "223b4c11",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@@ -162,7 +154,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.4"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

View File

@@ -58,8 +58,6 @@
}
],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
@@ -88,7 +86,7 @@
" # NOTE: This is returning another Runnable, not an actual output.\n",
" return contextualize_question\n",
" else:\n",
" return RunnablePassthrough() | itemgetter(\"question\")\n",
" return RunnablePassthrough()\n",
"\n",
"\n",
"@chain\n",

View File

@@ -246,11 +246,11 @@
"examples = [\n",
" (\n",
" \"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\",\n",
" Data(people=[]),\n",
" Person(name=None, height_in_meters=None, hair_color=None),\n",
" ),\n",
" (\n",
" \"Fiona traveled far from France to Spain.\",\n",
" Data(people=[Person(name=\"Fiona\", height_in_meters=None, hair_color=None)]),\n",
" Person(name=\"Fiona\", height_in_meters=None, hair_color=None),\n",
" ),\n",
"]\n",
"\n",

View File

@@ -23,7 +23,7 @@
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"- [Example selectors](/docs/concepts/#example-selectors)\n",
"- [LLMs](/docs/concepts/#llms)\n",
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
"\n",
":::\n",
"\n",

View File

@@ -23,7 +23,7 @@
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"- [Example selectors](/docs/concepts/#example-selectors)\n",
"- [Chat models](/docs/concepts/#chat-model)\n",
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
"\n",
":::\n",
"\n",
@@ -51,7 +51,7 @@
"- `examples`: A list of dictionary examples to include in the final prompt.\n",
"- `example_prompt`: converts each example into 1 or more messages through its [`format_messages`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=format_messages#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
"\n",
"Below is a simple demonstration. First, define the examples you'd like to include. Let's give the LLM an unfamiliar mathematical operator, denoted by the \"🦜\" emoji:"
"Below is a simple demonstration. First, define the examples you'd like to include:"
]
},
{
@@ -59,7 +59,17 @@
"execution_count": 1,
"id": "5b79e400",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain langchain-openai langchain-chroma\n",
"\n",
@@ -69,50 +79,9 @@
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
"cell_type": "markdown",
"id": "30856d92",
"metadata": {},
"source": [
"If we try to ask the model what the result of this expression is, it will fail:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "174dec5b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"\n",
"model.invoke(\"What is 2 🦜 9?\")"
]
},
{
"cell_type": "markdown",
"id": "e6d58385",
"metadata": {},
"source": [
"Now let's see what happens if we give the LLM some examples to work with. We'll define some below:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "0fc5a02a-6249-4e92-95c3-30fff9671e8b",
"metadata": {
"tags": []
@@ -122,8 +91,8 @@
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
"\n",
"examples = [\n",
" {\"input\": \"2 🦜 2\", \"output\": \"4\"},\n",
" {\"input\": \"2 🦜 3\", \"output\": \"5\"},\n",
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
"]"
]
},
@@ -137,7 +106,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"id": "65e72ad1-9060-47d0-91a1-bc130c8b98ac",
"metadata": {
"tags": []
@@ -147,7 +116,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[HumanMessage(content='2 🦜 2'), AIMessage(content='4'), HumanMessage(content='2 🦜 3'), AIMessage(content='5')]\n"
"[HumanMessage(content='2+2'), AIMessage(content='4'), HumanMessage(content='2+3'), AIMessage(content='5')]\n"
]
}
],
@@ -177,7 +146,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 4,
"id": "9f86d6d9-50de-41b6-b6c7-0f9980cc0187",
"metadata": {
"tags": []
@@ -193,17 +162,9 @@
")"
]
},
{
"cell_type": "markdown",
"id": "dd8029c5",
"metadata": {},
"source": [
"And now let's ask the model the initial question and see how it does:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 5,
"id": "97d443b1-6fae-4b36-bede-3ff7306288a3",
"metadata": {
"tags": []
@@ -212,10 +173,10 @@
{
"data": {
"text/plain": [
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
"AIMessage(content='A triangle does not have a square. The square of a number is the result of multiplying the number by itself.', response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 52, 'total_tokens': 75}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-3456c4ef-7b4d-4adb-9e02-8079de82a47a-0')"
]
},
"execution_count": 8,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -223,9 +184,9 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"chain = final_prompt | model\n",
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"\n",
"chain.invoke({\"input\": \"What is 2 🦜 9?\"})"
"chain.invoke({\"input\": \"What's the square of a triangle?\"})"
]
},
{
@@ -233,8 +194,6 @@
"id": "70ab7114-f07f-46be-8874-3705a25aba5f",
"metadata": {},
"source": [
"And we can see that the model has now inferred that the parrot emoji means addition from the given few-shot examples!\n",
"\n",
"## Dynamic few-shot prompting\n",
"\n",
"Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `example_selector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n",
@@ -249,7 +208,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 6,
"id": "ad66f06a-66fd-4fcc-8166-5d0e3c801e57",
"metadata": {
"tags": []
@@ -261,9 +220,9 @@
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"examples = [\n",
" {\"input\": \"2 🦜 2\", \"output\": \"4\"},\n",
" {\"input\": \"2 🦜 3\", \"output\": \"5\"},\n",
" {\"input\": \"2 🦜 4\", \"output\": \"6\"},\n",
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
" {\"input\": \"2+4\", \"output\": \"6\"},\n",
" {\"input\": \"What did the cow say to the moon?\", \"output\": \"nothing at all\"},\n",
" {\n",
" \"input\": \"Write me a poem about the moon\",\n",
@@ -288,7 +247,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 7,
"id": "7790303a-f722-452e-8921-b14bdf20bdff",
"metadata": {
"tags": []
@@ -298,10 +257,10 @@
"data": {
"text/plain": [
"[{'input': 'What did the cow say to the moon?', 'output': 'nothing at all'},\n",
" {'input': '2 🦜 4', 'output': '6'}]"
" {'input': '2+4', 'output': '6'}]"
]
},
"execution_count": 10,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -328,7 +287,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 14,
"id": "253c255e-41d7-45f6-9d88-c7a0ced4b1bd",
"metadata": {
"tags": []
@@ -338,7 +297,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[HumanMessage(content='2 🦜 3'), AIMessage(content='5'), HumanMessage(content='2 🦜 4'), AIMessage(content='6')]\n"
"[HumanMessage(content='2+3'), AIMessage(content='5'), HumanMessage(content='2+2'), AIMessage(content='4')]\n"
]
}
],
@@ -358,7 +317,7 @@
" ),\n",
")\n",
"\n",
"print(few_shot_prompt.invoke(input=\"What's 3 🦜 3?\").to_messages())"
"print(few_shot_prompt.invoke(input=\"What's 3+3?\").to_messages())"
]
},
{
@@ -371,7 +330,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 17,
"id": "e731cb45-f0ea-422c-be37-42af2a6cb2c4",
"metadata": {
"tags": []
@@ -381,7 +340,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"messages=[HumanMessage(content='2 🦜 3'), AIMessage(content='5'), HumanMessage(content='2 🦜 4'), AIMessage(content='6')]\n"
"messages=[HumanMessage(content='2+3'), AIMessage(content='5'), HumanMessage(content='2+2'), AIMessage(content='4')]\n"
]
}
],
@@ -394,7 +353,7 @@
" ]\n",
")\n",
"\n",
"print(few_shot_prompt.invoke(input=\"What's 3 🦜 3?\"))"
"print(few_shot_prompt.invoke(input=\"What's 3+3?\"))"
]
},
{
@@ -409,7 +368,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 18,
"id": "0568cbc6-5354-47f1-ab4d-dfcc616cf583",
"metadata": {
"tags": []
@@ -418,10 +377,10 @@
{
"data": {
"text/plain": [
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 51, 'total_tokens': 52}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-6bcbe158-a8e3-4a85-a754-1ba274a9f147-0')"
]
},
"execution_count": 13,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
@@ -429,7 +388,7 @@
"source": [
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"\n",
"chain.invoke({\"input\": \"What's 3 🦜 3?\"})"
"chain.invoke({\"input\": \"What's 3+3?\"})"
]
},
{
@@ -469,7 +428,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -43,7 +43,6 @@ This highlights functionality that is core to using LangChain.
- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
- [How to: inspect runnables](/docs/how_to/inspect)
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains)
## Components
@@ -85,7 +84,7 @@ These are the core building blocks you can use when building applications.
- [How to: stream tool calls](/docs/how_to/tool_streaming)
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
- [How to: force a specific tool call](/docs/how_to/tool_choice)
- [How to: force specific tool call](/docs/how_to/tool_choice)
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
### Messages
@@ -183,11 +182,10 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
### Tools
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools.
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call.
- [How to: create custom tools](/docs/how_to/custom_tools)
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
@@ -195,7 +193,6 @@ LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to p
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
- [How to: stream events from within a tool](/docs/how_to/tool_stream_events)
### Multimodal
@@ -207,7 +204,7 @@ LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to p
:::note
For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation.
For in depth how-to guides for agents, please check out [LangGraph](https://github.com/langchain-ai/langgraph) documentation.
:::
@@ -320,8 +317,7 @@ LangSmith allows you to closely trace, monitor and evaluate your LLM application
It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
LangSmith documentation is hosted on a separate site.
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/), but we'll highlight a few sections that are particularly
relevant to LangChain below:
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).
### Evaluation
<span data-heading-keywords="evaluation,evaluate"></span>
@@ -330,13 +326,3 @@ Evaluating performance is a vital part of building LLM-powered applications.
LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).
### Tracing
<span data-heading-keywords="trace,tracing"></span>
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
- [How to: trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain)
- [How to: add metadata and tags to traces](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain#add-metadata-and-tags-to-traces)
You can see general tracing-related how-tos [in this section of the LangSmith docs](https://docs.smith.langchain.com/how_to_guides/tracing).

View File

@@ -60,7 +60,7 @@
" * document addition by id (`add_documents` method with `ids` argument)\n",
" * delete by id (`delete` method with `ids` argument)\n",
"\n",
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
" \n",
"## Caution\n",
"\n",

View File

@@ -2,14 +2,11 @@
sidebar_position: 2
---
# How to install LangChain packages
The LangChain ecosystem is split into different packages, which allow you to choose exactly which pieces of
functionality to install.
# Installation
## Official release
To install the main LangChain package, run:
To install LangChain run:
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
@@ -24,24 +21,11 @@ import CodeBlock from "@theme/CodeBlock";
</TabItem>
</Tabs>
While this package acts as a sane starting point to using LangChain,
much of the value of LangChain comes when integrating it with various model providers, datastores, etc.
This will install the bare minimum requirements of LangChain.
A lot of the value of LangChain comes when integrating it with various model providers, datastores, etc.
By default, the dependencies needed to do that are NOT installed. You will need to install the dependencies for specific integrations separately.
We'll show how to do that in the next sections of this guide.
## Ecosystem packages
With the exception of the `langsmith` SDK, all packages in the LangChain ecosystem depend on `langchain-core`, which contains base
classes and abstractions that other packages use. The dependency graph below shows how the difference packages are related.
A directed arrow indicates that the source package depends on the target package:
![](/img/ecosystem_packages.png)
When installing a package, you do not need to explicitly install that package's explicit dependencies (such as `langchain-core`).
However, you may choose to if you are using a feature only available in a certain version of that dependency.
If you do, you should make sure that the installed or pinned version is compatible with any other integration packages you use.
### From source
## From source
If you want to install from source, you can do so by cloning the repo and be sure that the directory is `PATH/TO/REPO/langchain/libs/langchain` running:
@@ -49,21 +33,21 @@ If you want to install from source, you can do so by cloning the repo and be sur
pip install -e .
```
### LangChain core
## LangChain core
The `langchain-core` package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language. It is automatically installed by `langchain`, but can also be used separately. Install with:
```bash
pip install langchain-core
```
### LangChain community
## LangChain community
The `langchain-community` package contains third-party integrations. Install with:
```bash
pip install langchain-community
```
### LangChain experimental
## LangChain experimental
The `langchain-experimental` package holds experimental LangChain code, intended for research and experimental uses.
Install with:
@@ -71,15 +55,14 @@ Install with:
pip install langchain-experimental
```
### LangGraph
`langgraph` is a library for building stateful, multi-actor applications with LLMs. It integrates smoothly with LangChain, but can be used without it.
## LangGraph
`langgraph` is a library for building stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain.
Install with:
```bash
pip install langgraph
```
### LangServe
## LangServe
LangServe helps developers deploy LangChain runnables and chains as a REST API.
LangServe is automatically installed by LangChain CLI.
If not using LangChain CLI, install with:
@@ -97,10 +80,9 @@ Install with:
pip install langchain-cli
```
### LangSmith SDK
The LangSmith SDK is automatically installed by LangChain. However, it does not depend on
`langchain-core`, and can be installed and used independently if desired.
If you are not using LangChain, you can install it with:
## LangSmith SDK
The LangSmith SDK is automatically installed by LangChain.
If not using LangChain, install with:
```bash
pip install langsmith

View File

@@ -1,19 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "adc7ee09",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"keywords: [create_react_agent, create_react_agent()]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "457cdc67-1893-4653-8b0c-b185a5947e74",
@@ -21,18 +7,9 @@
"source": [
"# How to migrate from legacy LangChain agents to LangGraph\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Agents](/docs/concepts/#agents)\n",
"- [LangGraph](https://langchain-ai.github.io/langgraph/)\n",
"- [Tool calling](/docs/how_to/tool_calling/)\n",
"\n",
":::\n",
"\n",
"Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraph/) agents.\n",
"Here we focus on how to move from legacy LangChain agents to LangGraph agents.\n",
"LangChain agents (the [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters.\n",
"In this notebook we will show how those parameters map to the LangGraph react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) prebuilt helper method.\n",
"In this notebook we will show how those parameters map to the LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n",
"\n",
"#### Prerequisites\n",
"\n",
@@ -218,7 +195,7 @@
"\n",
"Let's take a look at all of these below. We will pass in custom instructions to get the agent to respond in Spanish.\n",
"\n",
"First up, using `AgentExecutor`:"
"First up, using AgentExecutor:"
]
},
{
@@ -261,16 +238,7 @@
"id": "bd5f5500-5ae4-4000-a9fd-8c5a2cc6404d",
"metadata": {},
"source": [
"Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n",
"\n",
"LangGraph's prebuilt `create_react_agent` does not take a prompt template directly as a parameter, but instead takes a [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) parameter. This modifies messages before they are passed into the model, and can be one of four values:\n",
"\n",
"- A `SystemMessage`, which is added to the beginning of the list of messages.\n",
"- A `string`, which is converted to a `SystemMessage` and added to the beginning of the list of messages.\n",
"- A `Callable`, which should take in a list of messages. The output is then passed to the language model.\n",
"- Or a [`Runnable`](/docs/concepts/#langchain-expression-language-lcel), which should should take in a list of messages. The output is then passed to the language model.\n",
"\n",
"Here's how it looks in action:"
"Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent). This can either be a string or a LangChain SystemMessage."
]
},
{
@@ -351,15 +319,7 @@
"id": "68df3a09",
"metadata": {},
"source": [
"## Memory"
]
},
{
"cell_type": "markdown",
"id": "96e7ffc8",
"metadata": {},
"source": [
"### In LangChain\n",
"## Memory\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation."
]
@@ -447,7 +407,7 @@
"id": "c2a5a32f",
"metadata": {},
"source": [
"### In LangGraph\n",
"#### In LangGraph\n",
"\n",
"Memory is just [persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/), aka [checkpointing](https://langchain-ai.github.io/langgraph/reference/checkpoints/).\n",
"\n",
@@ -518,8 +478,6 @@
"source": [
"## Iterating through steps\n",
"\n",
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) "
]
},
@@ -578,7 +536,7 @@
"id": "46ccbcbf",
"metadata": {},
"source": [
"### In LangGraph\n",
"#### In LangGraph\n",
"\n",
"In LangGraph, things are handled natively using [stream](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.graph.CompiledGraph.stream) or the asynchronous `astream` method."
]
@@ -629,8 +587,6 @@
"source": [
"## `return_intermediate_steps`\n",
"\n",
"### In LangChain\n",
"\n",
"Setting this parameter on AgentExecutor allows users to access intermediate_steps, which pairs agent actions (e.g., tool invocations) with their outcomes.\n"
]
},
@@ -659,8 +615,6 @@
"id": "594f7567-302f-4fa8-85bb-025ac8322162",
"metadata": {},
"source": [
"### In LangGraph\n",
"\n",
"By default the [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in LangGraph appends all messages to the central state. Therefore, it is easy to see any intermediate steps by just looking at the full state."
]
},
@@ -701,9 +655,11 @@
"source": [
"## `max_iterations`\n",
"\n",
"### In LangChain\n",
"`AgentExecutor` implements a `max_iterations` parameter, whereas this is controlled via `recursion_limit` in LangGraph.\n",
"\n",
"`AgentExecutor` implements a `max_iterations` parameter, allowing users to abort a run that exceeds a specified number of iterations."
"Note that in AgentExecutor, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n",
"\n",
"If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor."
]
},
{
@@ -781,20 +737,6 @@
"agent_executor.invoke({\"input\": query})"
]
},
{
"cell_type": "markdown",
"id": "dd3a933f",
"metadata": {},
"source": [
"### In LangGraph\n",
"\n",
"In LangGraph this is controlled via `recursion_limit` configuration parameter.\n",
"\n",
"Note that in `AgentExecutor`, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n",
"\n",
"If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor."
]
},
{
"cell_type": "code",
"execution_count": 16,
@@ -840,8 +782,6 @@
"source": [
"## `max_execution_time`\n",
"\n",
"### In LangChain\n",
"\n",
"`AgentExecutor` implements a `max_execution_time` parameter, allowing users to abort a run that exceeds a total time limit."
]
},
@@ -908,8 +848,6 @@
"id": "d02eb025",
"metadata": {},
"source": [
"### In LangGraph\n",
"\n",
"With LangGraph's react agent, you can control timeouts on two levels. \n",
"\n",
"You can set a `step_timeout` to bound each **step**:"
@@ -998,8 +936,6 @@
"source": [
"## `early_stopping_method`\n",
"\n",
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early_stopping_method](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`)."
]
},
@@ -1060,7 +996,7 @@
"id": "706e05c4",
"metadata": {},
"source": [
"### In LangGraph\n",
"#### In LangGraph\n",
"\n",
"In LangGraph, you can explicitly handle the response behavior outside the agent, since the full state can be accessed."
]
@@ -1109,8 +1045,6 @@
"source": [
"## `trim_intermediate_steps`\n",
"\n",
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim_intermediate_steps](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n",
"\n",
"For instance, we could trim the value so the agent only sees the most recent intermediate step."
@@ -1214,7 +1148,7 @@
"id": "3d450c5a",
"metadata": {},
"source": [
"### In LangGraph\n",
"#### In LangGraph\n",
"\n",
"We can use the [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) just as before when passing in [prompt templates](#prompt-templates)."
]
@@ -1278,18 +1212,6 @@
"except GraphRecursionError as e:\n",
" print(\"Stopping agent prematurely due to triggering stop condition\")"
]
},
{
"cell_type": "markdown",
"id": "41377eb8",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now learned how to migrate your LangChain agent executors to LangGraph.\n",
"\n",
"Next, check out other [LangGraph how-to guides](https://langchain-ai.github.io/langgraph/how-tos/)."
]
}
],
"metadata": {

View File

@@ -1,798 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "f331037f-be3f-4782-856f-d55dab952488",
"metadata": {},
"source": [
"# How to migrate chains to LCEL\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [LangChain Expression Language](/docs/concepts#langchain-expression-language-lcel)\n",
"\n",
":::\n",
"\n",
"LCEL is designed to streamline the process of building useful apps with LLMs and combining related components. It does this by providing:\n",
"\n",
"1. **A unified interface**: Every LCEL object implements the `Runnable` interface, which defines a common set of invocation methods (`invoke`, `batch`, `stream`, `ainvoke`, ...). This makes it possible to also automatically and consistently support useful operations like streaming of intermediate steps and batching, since every chain composed of LCEL objects is itself an LCEL object.\n",
"2. **Composition primitives**: LCEL provides a number of primitives that make it easy to compose chains, parallelize components, add fallbacks, dynamically configure chain internals, and more.\n",
"\n",
"LangChain maintains a number of legacy abstractions. Many of these can be reimplemented via short combinations of LCEL primitives. Doing so confers some general advantages:\n",
"\n",
"- The resulting chains typically implement the full `Runnable` interface, including streaming and asynchronous support where appropriate;\n",
"- The chains may be more easily extended or modified;\n",
"- The parameters of the chain are typically surfaced for easier customization (e.g., prompts) over previous versions, which tended to be subclasses and had opaque parameters and internals.\n",
"\n",
"The LCEL implementations can be slightly more verbose, but there are significant benefits in transparency and customizability.\n",
"\n",
"In this guide we review LCEL implementations of common legacy abstractions. Where appropriate, we link out to separate guides with more detail."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b99b47ec",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-community langchain langchain-openai faiss-cpu"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "717c8673",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
"cell_type": "markdown",
"id": "e3621b62-a037-42b8-8faa-59575608bb8b",
"metadata": {},
"source": [
"## `LLMChain`\n",
"<span data-heading-keywords=\"llmchain\"></span>\n",
"\n",
"[`LLMChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) combined a prompt template, LLM, and output parser into a class.\n",
"\n",
"Some advantages of switching to the LCEL implementation are:\n",
"\n",
"- Clarity around contents and parameters. The legacy `LLMChain` contains a default output parser and other options.\n",
"- Easier streaming. `LLMChain` only supports streaming via callbacks.\n",
"- Easier access to raw message outputs if desired. `LLMChain` only exposes these via a parameter or via callback.\n",
"\n",
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
"\n",
"<ColumnContainer>\n",
"\n",
"<Column>\n",
"\n",
"#### Legacy\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "e628905c-430e-4e4a-9d7c-c91d2f42052e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'adjective': 'funny',\n",
" 'text': \"Why couldn't the bicycle find its way home?\\n\\nBecause it lost its bearings!\"}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"user\", \"Tell me a {adjective} joke\")],\n",
")\n",
"\n",
"chain = LLMChain(llm=ChatOpenAI(), prompt=prompt)\n",
"\n",
"chain({\"adjective\": \"funny\"})"
]
},
{
"cell_type": "markdown",
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
"metadata": {},
"source": [
"\n",
"</Column>\n",
"\n",
"<Column>\n",
"\n",
"#### LCEL\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "0d2a7cf8-1bc7-405c-bb0d-f2ab2ba3b6ab",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two tired!\""
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"user\", \"Tell me a {adjective} joke\")],\n",
")\n",
"\n",
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
"\n",
"chain.invoke({\"adjective\": \"funny\"})"
]
},
{
"cell_type": "markdown",
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
"metadata": {},
"source": [
"\n",
"</Column>\n",
"</ColumnContainer>\n",
"\n",
"Note that `LLMChain` by default returns a `dict` containing both the input and the output. If this behavior is desired, we can replicate it using another LCEL primitive, [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html):"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "529206c5-abbe-4213-9e6c-3b8586c8000d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'adjective': 'funny',\n",
" 'text': \"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two tired!\"}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"outer_chain = RunnablePassthrough().assign(text=chain)\n",
"\n",
"outer_chain.invoke({\"adjective\": \"funny\"})"
]
},
{
"cell_type": "markdown",
"id": "29d2e26c-2854-4971-9c2b-613450993921",
"metadata": {},
"source": [
"See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers."
]
},
{
"cell_type": "markdown",
"id": "00df631d-5121-4918-94aa-b88acce9b769",
"metadata": {},
"source": [
"## `ConversationChain`\n",
"<span data-heading-keywords=\"conversationchain\"></span>\n",
"\n",
"[`ConversationChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.conversation.base.ConversationChain.html) incorporates a memory of previous messages to sustain a stateful conversation.\n",
"\n",
"Some advantages of switching to the LCEL implementation are:\n",
"\n",
"- Innate support for threads/separate sessions. To make this work with `ConversationChain`, you'd need to instantiate a separate memory class outside the chain.\n",
"- More explicit parameters. `ConversationChain` contains a hidden default prompt, which can cause confusion.\n",
"- Streaming support. `ConversationChain` only supports streaming via callbacks.\n",
"\n",
"`RunnableWithMessageHistory` implements sessions via configuration parameters. It should be instantiated with a callable that returns a [chat message history](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html). By default, it expects this function to take a single argument `session_id`.\n",
"\n",
"<ColumnContainer>\n",
"<Column>\n",
"\n",
"#### Legacy\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "4f2cc6dc-d70a-4c13-9258-452f14290da6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'input': 'how are you?',\n",
" 'history': '',\n",
" 'response': \"Arrr, I be doin' well, me matey! Just sailin' the high seas in search of treasure and adventure. How can I assist ye today?\"}"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import ConversationChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"template = \"\"\"\n",
"You are a pirate. Answer the following questions as best you can.\n",
"Chat history: {history}\n",
"Question: {input}\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"memory = ConversationBufferMemory()\n",
"\n",
"chain = ConversationChain(\n",
" llm=ChatOpenAI(),\n",
" memory=memory,\n",
" prompt=prompt,\n",
")\n",
"\n",
"chain({\"input\": \"how are you?\"})"
]
},
{
"cell_type": "markdown",
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
"metadata": {},
"source": [
"</Column>\n",
"\n",
"<Column>\n",
"\n",
"#### LCEL\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "173e1a9c-2a18-4669-b0de-136f39197786",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Arr, matey! I be sailin' the high seas with me crew, searchin' for buried treasure and adventure! How be ye doin' on this fine day?\""
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a pirate. Answer the following questions as best you can.\"),\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"history = InMemoryChatMessageHistory()\n",
"\n",
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
"\n",
"wrapped_chain = RunnableWithMessageHistory(chain, lambda x: history)\n",
"\n",
"wrapped_chain.invoke(\n",
" {\"input\": \"how are you?\"},\n",
" config={\"configurable\": {\"session_id\": \"42\"}},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "6b386ce6-895e-442c-88f3-7bec0ab9f401",
"metadata": {},
"source": [
"\n",
"</Column>\n",
"</ColumnContainer>\n",
"\n",
"The above example uses the same `history` for all sessions. The example below shows how to use a different chat history for each session."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4e05994f-1fbc-4699-bf2e-62cb0e4deeb8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"Ahoy there! What be ye wantin' from this old pirate?\", response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 29, 'total_tokens': 44}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1846d5f5-0dda-43b6-bb49-864e541f9c29-0', usage_metadata={'input_tokens': 29, 'output_tokens': 15, 'total_tokens': 44})"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.chat_history import BaseChatMessageHistory\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
"\n",
"store = {}\n",
"\n",
"\n",
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
" if session_id not in store:\n",
" store[session_id] = InMemoryChatMessageHistory()\n",
" return store[session_id]\n",
"\n",
"\n",
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
"\n",
"wrapped_chain = RunnableWithMessageHistory(chain, get_session_history)\n",
"\n",
"wrapped_chain.invoke(\"Hello!\", config={\"configurable\": {\"session_id\": \"abc123\"}})"
]
},
{
"cell_type": "markdown",
"id": "c36ebecb",
"metadata": {},
"source": [
"See [this tutorial](/docs/tutorials/chatbot) for a more end-to-end guide on building with [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html).\n",
"\n",
"## `RetrievalQA`\n",
"<span data-heading-keywords=\"retrievalqa\"></span>\n",
"\n",
"The [`RetrievalQA`](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval_qa.base.RetrievalQA.html) chain performed natural-language question answering over a data source using retrieval-augmented generation.\n",
"\n",
"Some advantages of switching to the LCEL implementation are:\n",
"\n",
"- Easier customizability. Details such as the prompt and how documents are formatted are only configurable via specific parameters in the `RetrievalQA` chain.\n",
"- More easily return source documents.\n",
"- Support for runnable methods like streaming and async operations.\n",
"\n",
"Now let's look at them side-by-side. We'll use the same ingestion code to load a [blog post by Lilian Weng](https://lilianweng.github.io/posts/2023-06-23-agent/) on autonomous agents into a local vector store:"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "1efbe16e",
"metadata": {},
"outputs": [],
"source": [
"# Load docs\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai.chat_models import ChatOpenAI\n",
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
"\n",
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
"data = loader.load()\n",
"\n",
"# Split\n",
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
"all_splits = text_splitter.split_documents(data)\n",
"\n",
"# Store splits\n",
"vectorstore = FAISS.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())\n",
"\n",
"# LLM\n",
"llm = ChatOpenAI()"
]
},
{
"cell_type": "markdown",
"id": "c7e16438",
"metadata": {},
"source": [
"<ColumnContainer>\n",
"\n",
"<Column>\n",
"\n",
"#### Legacy"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "43bf55a0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'query': 'What are autonomous agents?',\n",
" 'result': 'Autonomous agents are LLM-empowered agents that handle autonomous design, planning, and performance of complex tasks, such as scientific experiments. These agents can browse the Internet, read documentation, execute code, call robotics experimentation APIs, and leverage other LLMs. They are capable of reasoning and planning ahead for complicated tasks by breaking them down into smaller steps.'}"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import hub\n",
"from langchain.chains import RetrievalQA\n",
"\n",
"# See full prompt at https://smith.langchain.com/hub/rlm/rag-prompt\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
"qa_chain = RetrievalQA.from_llm(\n",
" llm, retriever=vectorstore.as_retriever(), prompt=prompt\n",
")\n",
"\n",
"qa_chain(\"What are autonomous agents?\")"
]
},
{
"cell_type": "markdown",
"id": "081948e5",
"metadata": {},
"source": [
"</Column>\n",
"\n",
"<Column>\n",
"\n",
"#### LCEL\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "9efcc931",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Autonomous agents are agents that can handle autonomous design, planning, and performance of complex tasks, such as scientific experiments. They can browse the Internet, read documentation, execute code, call robotics experimentation APIs, and leverage other language model models. These agents use reasoning steps to develop solutions to specific tasks, like creating a novel anticancer drug.'"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"# See full prompt at https://smith.langchain.com/hub/rlm/rag-prompt\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"\n",
"qa_chain = (\n",
" {\n",
" \"context\": vectorstore.as_retriever() | format_docs,\n",
" \"question\": RunnablePassthrough(),\n",
" }\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"\n",
"qa_chain.invoke(\"What are autonomous agents?\")"
]
},
{
"cell_type": "markdown",
"id": "d6f44fe8",
"metadata": {},
"source": [
"</Column>\n",
"</ColumnContainer>\n",
"\n",
"The LCEL implementation exposes the internals of what's happening around retrieving, formatting documents, and passing them through a prompt to the LLM, but it is more verbose. You can customize and wrap this composition logic in a helper function, or use the higher-level [`create_retrieval_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html) and [`create_stuff_documents_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) helper method:"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "5fe42761",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'input': 'What are autonomous agents?',\n",
" 'context': [Document(page_content='Boiko et al. (2023) also looked into LLM-empowered agents for scientific discovery, to handle autonomous design, planning, and performance of complex scientific experiments. This agent can use tools to browse the Internet, read documentation, execute code, call robotics experimentation APIs and leverage other LLMs.\\nFor example, when requested to \"develop a novel anticancer drug\", the model came up with the following reasoning steps:', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'}),\n",
" Document(page_content='Weng, Lilian. (Jun 2023). “LLM-powered Autonomous Agents”. LilLog. https://lilianweng.github.io/posts/2023-06-23-agent/.', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'}),\n",
" Document(page_content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'}),\n",
" Document(page_content=\"LLM Powered Autonomous Agents | Lil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nPosts\\n\\n\\n\\n\\nArchive\\n\\n\\n\\n\\nSearch\\n\\n\\n\\n\\nTags\\n\\n\\n\\n\\nFAQ\\n\\n\\n\\n\\nemojisearch.app\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n LLM Powered Autonomous Agents\\n \\nDate: June 23, 2023 | Estimated Reading Time: 31 min | Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\", metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'})],\n",
" 'answer': 'Autonomous agents are entities that can operate independently, making decisions and taking actions without direct human intervention. These agents can perform tasks such as planning, executing complex experiments, and leveraging various tools and resources to achieve objectives. In the context provided, LLM-powered autonomous agents are specifically designed for scientific discovery, capable of handling tasks like designing novel anticancer drugs through reasoning steps.'}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import hub\n",
"from langchain.chains import create_retrieval_chain\n",
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
"\n",
"# See full prompt at https://smith.langchain.com/hub/langchain-ai/retrieval-qa-chat\n",
"retrieval_qa_chat_prompt = hub.pull(\"langchain-ai/retrieval-qa-chat\")\n",
"\n",
"combine_docs_chain = create_stuff_documents_chain(llm, retrieval_qa_chat_prompt)\n",
"rag_chain = create_retrieval_chain(vectorstore.as_retriever(), combine_docs_chain)\n",
"\n",
"rag_chain.invoke({\"input\": \"What are autonomous agents?\"})"
]
},
{
"cell_type": "markdown",
"id": "2772f4e9",
"metadata": {},
"source": [
"## `ConversationalRetrievalChain`\n",
"<span data-heading-keywords=\"conversationalretrievalchain\"></span>\n",
"\n",
"The [`ConversationalRetrievalChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.conversational_retrieval.base.ConversationalRetrievalChain.html) was an all-in one way that combined retrieval-augmented generation with chat history, allowing you to \"chat with\" your documents.\n",
"\n",
"Advantages of switching to the LCEL implementation are similar to the `RetrievalQA` section above:\n",
"\n",
"- Clearer internals. The `ConversationalRetrievalChain` chain hides an entire question rephrasing step which dereferences the initial query against the chat history.\n",
" - This means the class contains two sets of configurable prompts, LLMs, etc.\n",
"- More easily return source documents.\n",
"- Support for runnable methods like streaming and async operations.\n",
"\n",
"Here are side-by-side implementations with custom prompts. We'll reuse the loaded documents and vector store from the previous section:"
]
},
{
"cell_type": "markdown",
"id": "8bc06416",
"metadata": {},
"source": [
"<ColumnContainer>\n",
"\n",
"<Column>\n",
"\n",
"#### Legacy"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "54eb9576",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'question': 'What are autonomous agents?',\n",
" 'chat_history': '',\n",
" 'answer': 'Autonomous agents are powered by Large Language Models (LLMs) to handle tasks like scientific discovery and complex experiments autonomously. These agents can browse the internet, read documentation, execute code, and leverage other LLMs to perform tasks. They can reason and plan ahead to decompose complicated tasks into manageable steps.'}"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import ConversationalRetrievalChain\n",
"\n",
"condense_question_template = \"\"\"\n",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n",
"\n",
"Chat History:\n",
"{chat_history}\n",
"Follow Up Input: {question}\n",
"Standalone question:\"\"\"\n",
"\n",
"condense_question_prompt = ChatPromptTemplate.from_template(condense_question_template)\n",
"\n",
"qa_template = \"\"\"\n",
"You are an assistant for question-answering tasks.\n",
"Use the following pieces of retrieved context to answer\n",
"the question. If you don't know the answer, say that you\n",
"don't know. Use three sentences maximum and keep the\n",
"answer concise.\n",
"\n",
"Chat History:\n",
"{chat_history}\n",
"\n",
"Other context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"\n",
"qa_prompt = ChatPromptTemplate.from_template(qa_template)\n",
"\n",
"convo_qa_chain = ConversationalRetrievalChain.from_llm(\n",
" llm,\n",
" vectorstore.as_retriever(),\n",
" condense_question_prompt=condense_question_prompt,\n",
" combine_docs_chain_kwargs={\n",
" \"prompt\": qa_prompt,\n",
" },\n",
")\n",
"\n",
"convo_qa_chain(\n",
" {\n",
" \"question\": \"What are autonomous agents?\",\n",
" \"chat_history\": \"\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "43a8a23c",
"metadata": {},
"source": [
"</Column>\n",
"\n",
"<Column>\n",
"\n",
"#### LCEL\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "c884b138",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'input': 'What are autonomous agents?',\n",
" 'chat_history': [],\n",
" 'context': [Document(page_content='Boiko et al. (2023) also looked into LLM-empowered agents for scientific discovery, to handle autonomous design, planning, and performance of complex scientific experiments. This agent can use tools to browse the Internet, read documentation, execute code, call robotics experimentation APIs and leverage other LLMs.\\nFor example, when requested to \"develop a novel anticancer drug\", the model came up with the following reasoning steps:', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'}),\n",
" Document(page_content='Weng, Lilian. (Jun 2023). “LLM-powered Autonomous Agents”. LilLog. https://lilianweng.github.io/posts/2023-06-23-agent/.', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'}),\n",
" Document(page_content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'}),\n",
" Document(page_content='Or\\n@article{weng2023agent,\\n title = \"LLM-powered Autonomous Agents\",\\n author = \"Weng, Lilian\",\\n journal = \"lilianweng.github.io\",\\n year = \"2023\",\\n month = \"Jun\",\\n url = \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\\n}\\nReferences#\\n[1] Wei et al. “Chain of thought prompting elicits reasoning in large language models.” NeurIPS 2022\\n[2] Yao et al. “Tree of Thoughts: Dliberate Problem Solving with Large Language Models.” arXiv preprint arXiv:2305.10601 (2023).', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agents brain, complemented by several key components:', 'language': 'en'})],\n",
" 'answer': 'Autonomous agents are entities capable of acting independently, making decisions, and performing tasks without direct human intervention. These agents can interact with their environment, perceive information, and take actions based on their goals or objectives. They often use artificial intelligence techniques to navigate and accomplish tasks in complex or dynamic environments.'}"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import create_history_aware_retriever, create_retrieval_chain\n",
"\n",
"condense_question_system_template = (\n",
" \"Given a chat history and the latest user question \"\n",
" \"which might reference context in the chat history, \"\n",
" \"formulate a standalone question which can be understood \"\n",
" \"without the chat history. Do NOT answer the question, \"\n",
" \"just reformulate it if needed and otherwise return it as is.\"\n",
")\n",
"\n",
"condense_question_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", condense_question_system_template),\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"history_aware_retriever = create_history_aware_retriever(\n",
" llm, vectorstore.as_retriever(), condense_question_prompt\n",
")\n",
"\n",
"system_prompt = (\n",
" \"You are an assistant for question-answering tasks. \"\n",
" \"Use the following pieces of retrieved context to answer \"\n",
" \"the question. If you don't know the answer, say that you \"\n",
" \"don't know. Use three sentences maximum and keep the \"\n",
" \"answer concise.\"\n",
" \"\\n\\n\"\n",
" \"{context}\"\n",
")\n",
"\n",
"qa_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", system_prompt),\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"qa_chain = create_stuff_documents_chain(llm, qa_prompt)\n",
"\n",
"convo_qa_chain = create_retrieval_chain(history_aware_retriever, qa_chain)\n",
"\n",
"convo_qa_chain.invoke(\n",
" {\n",
" \"input\": \"What are autonomous agents?\",\n",
" \"chat_history\": [],\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "b2717810",
"metadata": {},
"source": [
"</Column>\n",
"\n",
"</ColumnContainer>\n",
"\n",
"## Next steps\n",
"\n",
"You've now seen how to migrate existing usage of some legacy chains to LCEL.\n",
"\n",
"Next, check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -58,7 +58,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "6d55008f",
"metadata": {},
"outputs": [],
@@ -81,17 +81,17 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 38,
"id": "070bf702",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=8)"
"Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)"
]
},
"execution_count": 3,
"execution_count": 38,
"metadata": {},
"output_type": "execute_result"
}
@@ -514,49 +514,12 @@
")"
]
},
{
"cell_type": "markdown",
"id": "91e95aa2",
"metadata": {},
"source": [
"### (Advanced) Raw outputs\n",
"\n",
"LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `include_raw=True`. This changes the output format to contain the raw message output, the `parsed` value (if successful), and any resulting errors:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "10ed2842",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ASK4EmZeZ69Fi3p554Mb4rWy', 'function': {'arguments': '{\"setup\":\"Why was the cat sitting on the computer?\",\"punchline\":\"Because it wanted to keep an eye on the mouse!\"}', 'name': 'Joke'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 36, 'prompt_tokens': 107, 'total_tokens': 143}, 'model_name': 'gpt-4-0125-preview', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-6491d35b-9164-4656-b75c-d7882cfb76cb-0', tool_calls=[{'name': 'Joke', 'args': {'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the mouse!'}, 'id': 'call_ASK4EmZeZ69Fi3p554Mb4rWy'}], usage_metadata={'input_tokens': 107, 'output_tokens': 36, 'total_tokens': 143}),\n",
" 'parsed': Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=None),\n",
" 'parsing_error': None}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"structured_llm = llm.with_structured_output(Joke, include_raw=True)\n",
"\n",
"structured_llm.invoke(\n",
" \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "5e92a98a",
"metadata": {},
"source": [
"## Prompting and parsing model outputs directly\n",
"## Prompting and parsing model directly\n",
"\n",
"Not all models support `.with_structured_output()`, since not all models have tool calling or JSON mode support. For such models you'll need to directly prompt the model to use a specific format, and use an output parser to extract the structured response from the raw model output.\n",
"\n",
@@ -824,9 +787,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "poetry-venv-2",
"language": "python",
"name": "python3"
"name": "poetry-venv-2"
},
"language_info": {
"codemirror_mode": {
@@ -838,7 +801,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -1,18 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"keywords: [tool calling, tool call]\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
@@ -24,7 +11,6 @@
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [Output parsers](/docs/concepts/#output-parsers)\n",
"\n",
":::\n",
"\n",
@@ -52,12 +38,6 @@
"parameters matching the desired schema, then treat the generated output as your final \n",
"result.\n",
"\n",
":::note\n",
"\n",
"If you only need formatted values, try the [.with_structured_output()](/docs/how_to/structured_output/#the-with_structured_output-method) chat model method as a simpler entrypoint.\n",
"\n",
":::\n",
"\n",
"However, tool calling goes beyond [structured output](/docs/how_to/structured_output/)\n",
"since you can pass responses from called tools back to the model to create longer interactions.\n",
"For instance, given a search engine tool, an LLM might handle a \n",
@@ -92,7 +72,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@@ -123,7 +103,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -171,14 +151,22 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain_openai\n",
"%pip install -qU langchain langchain_openai\n",
"\n",
"import os\n",
"from getpass import getpass\n",
@@ -192,33 +180,18 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_g4RuAijtDcSeM96jXyCuiLSN', 'function': {'arguments': '{\"a\":3,\"b\":12}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 95, 'total_tokens': 113}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-5157d15a-7e0e-4ab1-af48-3d98010cd152-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_g4RuAijtDcSeM96jXyCuiLSN'}], usage_metadata={'input_tokens': 95, 'output_tokens': 18, 'total_tokens': 113})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"llm_with_tools = llm.bind_tools(tools)\n",
"\n",
"query = \"What is 3 * 12?\"\n",
"\n",
"llm_with_tools.invoke(query)"
"llm_with_tools = llm.bind_tools(tools)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [bind_tools()](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [`bind_tool`](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
]
},
{
@@ -242,7 +215,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [
{
@@ -250,15 +223,14 @@
"text/plain": [
"[{'name': 'Multiply',\n",
" 'args': {'a': 3, 'b': 12},\n",
" 'id': 'call_TnadLbWJu9HwDULRb51RNSMw'},\n",
" 'id': 'call_KquHA7mSbgtAkpkmRPaFnJKa'},\n",
" {'name': 'Add',\n",
" 'args': {'a': 11, 'b': 49},\n",
" 'id': 'call_Q9vt1up05sOQScXvUYWzSpCg'}]"
" 'id': 'call_Fl0hQi4IBTzlpaJYlM5kPQhE'}]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
@@ -279,13 +251,12 @@
"a name, string arguments, identifier, and error message.\n",
"\n",
"If desired, [output parsers](/docs/how_to#output-parsers) can further \n",
"process the output. For example, we can convert existing values populated on the `.tool_calls` attribute back to the original Pydantic class using the\n",
"[PydanticToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.openai_tools.PydanticToolsParser.html):"
"process the output. For example, we can convert back to the original Pydantic class:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
@@ -294,13 +265,12 @@
"[Multiply(a=3, b=12), Add(a=11, b=49)]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
"from langchain_core.output_parsers import PydanticToolsParser\n",
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
"\n",
"chain = llm_with_tools | PydanticToolsParser(tools=[Multiply, Add])\n",
"chain.invoke(query)"
@@ -328,24 +298,10 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 4
"nbformat_minor": 2
}

View File

@@ -6,14 +6,6 @@
"source": [
"# How to force tool calling behavior\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
":::\n",
"\n",
"In order to force our LLM to spelect a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
]
},

View File

@@ -12,7 +12,7 @@
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to create tools](/docs/how_to/custom_tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling)\n",
":::\n",
"\n",
":::{.callout-info} Supported models\n",

View File

@@ -1,286 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to stream events from within a tool\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n",
"\n",
":::\n",
"\n",
"If you have tools that call LLMs, retrievers, or other runnables, you may want to access internal events from those runnables. This guide shows you a few ways you can do this using the `astream_events()` method.\n",
"\n",
":::caution\n",
"LangChain cannot automatically propagate callbacks to child runnables if you are running async code in python<=3.10.\n",
" \n",
"This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
":::\n",
"\n",
"We'll define a custom tool below that calls a chain that summarizes its input in a special way by prompting an LLM to return only 10 words, then reversing the output:\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"model\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import os\n",
"from getpass import getpass\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
"\n",
"model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def special_summarization_tool(long_text: str) -> str:\n",
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
" prompt = ChatPromptTemplate.from_template(\n",
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
" )\n",
"\n",
" def reverse(x: str):\n",
" return x[::-1]\n",
"\n",
" chain = prompt | model | StrOutputParser() | reverse\n",
" summary = chain.invoke({\"long_text\": long_text})\n",
" return summary"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you just invoke the tool directly, you can see that you only get the final response:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"LONG_TEXT = \"\"\"\n",
"NARRATOR:\n",
"(Black screen with text; The sound of buzzing bees can be heard)\n",
"According to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\n",
"BARRY BENSON:\n",
"(Barry is picking out a shirt)\n",
"Yellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\n",
"JANET BENSON:\n",
"Barry! Breakfast is ready!\n",
"BARRY:\n",
"Coming! Hang on a second.\n",
"\"\"\"\n",
"\n",
"special_summarization_tool.invoke({\"long_text\": LONG_TEXT})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you wanted to access the raw output from the chat model, you could use the [`astream_events()`](/docs/how_to/streaming/#using-stream-events) method and look for `on_chat_model_end` events:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chat_model_end', 'data': {'output': AIMessage(content='Bee defies physics; Barry chooses outfit for graduation day.', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-195c0986-2ffa-43a3-9366-f2f96c42fe57', usage_metadata={'input_tokens': 182, 'output_tokens': 16, 'total_tokens': 198}), 'input': {'messages': [[HumanMessage(content=\"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n\\nNARRATOR:\\n(Black screen with text; The sound of buzzing bees can be heard)\\nAccording to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\\nBARRY BENSON:\\n(Barry is picking out a shirt)\\nYellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\\nJANET BENSON:\\nBarry! Breakfast is ready!\\nBARRY:\\nComing! Hang on a second.\\n\")]]}}, 'run_id': '195c0986-2ffa-43a3-9366-f2f96c42fe57', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['370919df-1bc3-43ae-aab2-8e112a4ddf47', 'de535624-278b-4927-9393-6d0cac3248df']}\n"
]
}
],
"source": [
"stream = special_summarization_tool.astream_events(\n",
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
")\n",
"\n",
"async for event in stream:\n",
" if event[\"event\"] == \"on_chat_model_end\":\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And you can see that you get the raw response from the chat model.\n",
"\n",
"`astream_events()` will automatically call internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter our calls to look for `on_chat_model_stream` events with no other changes:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n"
]
}
],
"source": [
"stream = special_summarization_tool.astream_events(\n",
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
")\n",
"\n",
"async for event in stream:\n",
" if event[\"event\"] == \"on_chat_model_stream\":\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note that you still have access to the final tool response as well. You can access it by looking for an `on_tool_end` event.\n",
"\n",
"To make events your tool emits easier to identify, you can also add identifiers to runnables using the `with_config()` method. `run_name` will apply to only to the runnable you attach it to, while `tags` will be inherited by runnables called within your initial runnable.\n",
"\n",
"Let's redeclare the tool with a tag, then run it with `astream_events()` with some filters. You should only see streamed events from the chat model and the final tool output:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
"{'event': 'on_tool_end', 'data': {'output': '.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'}, 'run_id': '49d9d7d3-2b02-4964-a6c5-12f57a063146', 'name': 'special_summarization_tool', 'tags': ['bee_movie'], 'metadata': {}, 'parent_ids': []}\n"
]
}
],
"source": [
"tagged_tool = special_summarization_tool.with_config({\"tags\": [\"bee_movie\"]})\n",
"\n",
"stream = tagged_tool.astream_events(\n",
" {\"long_text\": LONG_TEXT}, version=\"v2\", include_tags=[\"bee_movie\"]\n",
")\n",
"\n",
"async for event in stream:\n",
" event_type = event[\"event\"]\n",
" if event_type == \"on_chat_model_stream\" or event_type == \"on_tool_end\":\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Now you've learned how to stream events from within a tool. Next, you can learn more about how to use tools:\n",
"\n",
"- Bind [model-specific tools](/docs/how_to/tools_model_specific/)\n",
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
"\n",
"You can also check out some more specific uses of tool calling:\n",
"\n",
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -7,18 +7,9 @@
"source": [
"# How to handle tool errors\n",
"\n",
":::info Prerequisites\n",
"Using a model to invoke a tool has some obvious potential failure modes. Firstly, the model needs to return a output that can be parsed at all. Secondly, the model needs to return tool arguments that are valid.\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
"\n",
":::\n",
"\n",
"Calling tools with an LLM is generally more reliable than pure prompting, but it isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n",
"\n",
"This guide covers some ways to build error handling into your chains to mitigate these failure modes."
"We can build error handling into our chains to mitigate these failure modes."
]
},
{
@@ -51,7 +42,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"id": "08785b6d-722d-4620-b6ec-36deb3842c69",
"metadata": {},
"outputs": [],
@@ -81,7 +72,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "86258950-5e61-4340-81b9-84a5d26e8773",
"metadata": {},
"outputs": [],
@@ -91,14 +82,12 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "1d20604e-c4d1-4d21-841b-23e4f61aec36",
"metadata": {},
"outputs": [],
@@ -110,13 +99,28 @@
"@tool\n",
"def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:\n",
" \"\"\"Do something complex with a complex tool.\"\"\"\n",
" return int_arg * float_arg\n",
"\n",
"\n",
" return int_arg * float_arg"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "553c2c13-28c8-4451-8a3a-6c31d52dc31d",
"metadata": {},
"outputs": [],
"source": [
"llm_with_tools = llm.bind_tools(\n",
" [complex_tool],\n",
")\n",
"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "802b2eca-9f79-4d6c-8257-85139ca5c752",
"metadata": {},
"outputs": [],
"source": [
"# Define chain\n",
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool"
]
@@ -131,7 +135,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 12,
"id": "d354664c-ac44-4967-a35f-8912b3ad9477",
"metadata": {},
"outputs": [
@@ -142,14 +146,14 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/runnables/base.py:2572\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2570\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m step\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2571\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2572\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2573\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2574\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:380\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 374\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 376\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 377\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 378\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 379\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 382\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 383\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 384\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 385\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 387\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 388\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 389\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:537\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 536\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 537\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 538\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 539\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:526\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 524\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 525\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[0;32m--> 526\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 527\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 528\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 529\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(\n\u001b[1;32m 530\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m context\u001b[38;5;241m.\u001b[39mrun(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 534\u001b[0m )\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:424\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 422\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 423\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 424\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 425\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 426\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 428\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 429\u001b[0m }\n\u001b[1;32m 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:526\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
"Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:241\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 237\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 238\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 239\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 240\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 241\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 242\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 243\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 244\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 245\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 246\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:387\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 387\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 388\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 389\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:378\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 364\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_tool_start(\n\u001b[1;32m 365\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdescription\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdescription},\n\u001b[1;32m 366\u001b[0m tool_input \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tool_input, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(tool_input),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 376\u001b[0m )\n\u001b[1;32m 377\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 378\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 379\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 380\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 384\u001b[0m )\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:283\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 283\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 285\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 286\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 288\u001b[0m }\n\u001b[1;32m 289\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:526\u001b[0m, in \u001b[0;36mBaseModel.parse_obj\u001b[0;34m(cls, obj)\u001b[0m\n\u001b[1;32m 524\u001b[0m exc \u001b[38;5;241m=\u001b[39m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m expected dict not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mobj\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 525\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ValidationError([ErrorWrapper(exc, loc\u001b[38;5;241m=\u001b[39mROOT_KEY)], \u001b[38;5;28mcls\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m--> 526\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:341\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(__pydantic_self__, **data)\u001b[0m\n\u001b[1;32m 339\u001b[0m values, fields_set, validation_error \u001b[38;5;241m=\u001b[39m validate_model(__pydantic_self__\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m, data)\n\u001b[1;32m 340\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m validation_error:\n\u001b[0;32m--> 341\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m validation_error\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 343\u001b[0m object_setattr(__pydantic_self__, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__dict__\u001b[39m\u001b[38;5;124m'\u001b[39m, values)\n",
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)"
]
}
@@ -172,26 +176,10 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 6,
"id": "8fedb550-683d-45ae-8876-ae7acb332019",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Calling tool with arguments:\n",
"\n",
"{'int_arg': 5, 'float_arg': 2.1}\n",
"\n",
"raised the following error:\n",
"\n",
"<class 'pydantic.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
"dict_arg\n",
" field required (type=value_error.missing)\n"
]
}
],
"outputs": [],
"source": [
"from typing import Any\n",
"\n",
@@ -205,8 +193,32 @@
" return f\"Calling tool with arguments:\\n\\n{tool_args}\\n\\nraised the following error:\\n\\n{type(e)}: {e}\"\n",
"\n",
"\n",
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool\n",
"\n",
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "71a2c98d-c0be-4c0a-bb3d-41ad4596526c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Calling tool with arguments:\n",
"\n",
"{'int_arg': 5, 'float_arg': 2.1}\n",
"\n",
"raised the following error:\n",
"\n",
"<class 'pydantic.v1.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
"dict_arg\n",
" field required (type=value_error.missing)\n"
]
}
],
"source": [
"print(\n",
" chain.invoke(\n",
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
@@ -226,7 +238,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 17,
"id": "02cc4223-35fa-4240-976a-012299ca703c",
"metadata": {},
"outputs": [
@@ -236,22 +248,19 @@
"10.5"
]
},
"execution_count": 10,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
"\n",
"better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind_tools(\n",
" [complex_tool], tool_choice=\"complex_tool\"\n",
")\n",
"\n",
"better_chain = better_model | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
"\n",
"chain_with_fallback = chain.with_fallbacks([better_chain])\n",
"\n",
"chain_with_fallback.invoke(\n",
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
")"
@@ -262,7 +271,7 @@
"id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9",
"metadata": {},
"source": [
"Looking at the [LangSmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
"Looking at the [Langsmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
]
},
{
@@ -277,13 +286,17 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 13,
"id": "b5659956-9454-468a-9753-a3ff9052b8f5",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"from typing import Any\n",
"\n",
"from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"\n",
"class CustomToolException(Exception):\n",
@@ -323,7 +336,7 @@
"# affect the prompt at all, but gives us the option to insert an arbitrary list of Messages\n",
"# into the prompt if needed. We'll use this on retries to insert the error message.\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"human\", \"{input}\"), (\"placeholder\", \"{last_output}\")]\n",
" [(\"human\", \"{input}\"), MessagesPlaceholder(\"last_output\", optional=True)]\n",
")\n",
"chain = prompt | llm_with_tools | tool_custom_exception\n",
"\n",
@@ -335,7 +348,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 14,
"id": "4c45f5bd-cbb4-47d5-b4b6-aec50673c750",
"metadata": {},
"outputs": [
@@ -345,7 +358,7 @@
"10.5"
]
},
"execution_count": 12,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -365,24 +378,6 @@
"source": [
"And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/c11e804c-e14f-4059-bd09-64766f999c14/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds."
]
},
{
"cell_type": "markdown",
"id": "6b97af9f",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Now you've seen some strategies how to handle tool calling errors. Next, you can learn more about how to use tools:\n",
"\n",
"- Few shot prompting [with tools](/docs/how_to/tools_few_shot/)\n",
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
"\n",
"You can also check out some more specific uses of tool calling:\n",
"\n",
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
]
}
],
"metadata": {
@@ -401,7 +396,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.1"
}
},
"nbformat": 4,

View File

@@ -18,9 +18,7 @@
"# ChatAI21\n",
"\n",
"This notebook covers how to get started with AI21 chat models.\n",
"Note that different chat models support different parameters. See the ",
"[AI21 documentation](https://docs.ai21.com/reference) to learn more about the parameters in your chosen model.\n",
"[See all AI21's LangChain components.](https://pypi.org/project/langchain-ai21/) \n",
"\n",
"## Installation"
]
},
@@ -46,8 +44,7 @@
"source": [
"## Environment Setup\n",
"\n",
"We'll need to get an [AI21 API key](https://docs.ai21.com/) and set the ",
"`AI21_API_KEY` environment variable:\n"
"We'll need to get a [AI21 API key](https://docs.ai21.com/) and set the `AI21_API_KEY` environment variable:\n"
]
},
{

View File

@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"id": "641f8cb0",
"metadata": {},
"source": [
"---\n",
@@ -12,89 +12,20 @@
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"id": "38f26d7a",
"metadata": {},
"source": [
"# AzureChatOpenAI\n",
"\n",
"This guide will help you get started with AzureOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html).\n",
">[Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview) provides REST API access to OpenAI's powerful language models including the GPT-4, GPT-3.5-Turbo, and Embeddings model series. These models can be easily adapted to your specific task including but not limited to content generation, summarization, semantic search, and natural language to code translation. Users can access the service through REST APIs, Python SDK, or a web-based interface in the Azure OpenAI Studio.\n",
"\n",
"Azure OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).\n",
"\n",
":::info Azure OpenAI vs OpenAI\n",
"\n",
"Azure OpenAI refers to OpenAI models hosted on the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). OpenAI also provides its own model APIs. To access OpenAI services directly, use the [ChatOpenAI integration](/docs/integrations/chat/openai/).\n",
"\n",
":::\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/azure) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [AzureChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html) | [langchain-openai](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-openai?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-openai?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
"\n",
"## Setup\n",
"\n",
"To access AzureOpenAI models you'll need to create an Azure account, create a deployment of an Azure OpenAI model, get the name and endpoint for your deployment, get an Azure OpenAI API key, and install the `langchain-openai` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"Head to the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python) to create your deployment and generate an API key. Once you've done this set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT environment variables:"
"This notebook goes over how to connect to an Azure-hosted OpenAI endpoint. First, we need to install the `langchain-openai` package."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\"Enter your AzureOpenAI API key: \")\n",
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://YOUR-ENDPOINT.openai.azure.com/\""
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": [
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain AzureOpenAI integration lives in the `langchain-openai` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"id": "d83ba7de",
"metadata": {},
"outputs": [],
"source": [
@@ -103,56 +34,65 @@
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"id": "e39133c8",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions.\n",
"- Replace `azure_deployment` with the name of your deployment,\n",
"- You can find the latest supported `api_version` here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference."
"Next, let's set some environment variables to help us connect to the Azure OpenAI service. You can find these values in the Azure portal."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"execution_count": null,
"id": "1d8d73bd",
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import AzureChatOpenAI\n",
"import os\n",
"\n",
"llm = AzureChatOpenAI(\n",
" azure_deployment=\"YOUR-DEPLOYMENT\",\n",
" api_version=\"2024-05-01-preview\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
"os.environ[\"AZURE_OPENAI_API_KEY\"] = \"...\"\n",
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://<your-endpoint>.openai.azure.com/\"\n",
"os.environ[\"AZURE_OPENAI_API_VERSION\"] = \"2023-06-01-preview\"\n",
"os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"] = \"chat\""
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"id": "e7b160f8",
"metadata": {},
"source": [
"## Invocation"
"Next, let's construct our model and chat with it:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cbe4bb58-ba13-4355-8af9-cd990dc47a64",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage\n",
"from langchain_openai import AzureChatOpenAI\n",
"\n",
"model = AzureChatOpenAI(\n",
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
" azure_deployment=os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"id": "99509140",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 31, 'total_tokens': 39}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-a6a732c2-cb02-4e50-9a9c-ab30eab034fc-0', usage_metadata={'input_tokens': 31, 'output_tokens': 8, 'total_tokens': 39})"
"AIMessage(content=\"J'adore programmer.\", response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 19, 'total_tokens': 25}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-25ed88db-38f2-4b0c-a943-a03f217711a9-0')"
]
},
"execution_count": 4,
@@ -161,165 +101,95 @@
}
],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"J'adore la programmation.\n"
]
}
],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 26, 'total_tokens': 32}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-084967d7-06f2-441f-b5c1-477e2a9e9d03-0', usage_metadata={'input_tokens': 26, 'output_tokens': 6, 'total_tokens': 32})"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
"message = HumanMessage(\n",
" content=\"Translate this sentence from English to French. I love programming.\"\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
"model.invoke([message])"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"id": "f27fa24d",
"metadata": {},
"source": [
"## Specifying model version\n",
"\n",
"Azure OpenAI responses contain `model_name` response metadata property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the specific version of the model, which is set on the deployment in Azure. E.g. it does not distinguish between `gpt-35-turbo-0125` and `gpt-35-turbo-0301`. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n",
"## Model Version\n",
"Azure OpenAI responses contain `model` property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the version of the model, which is set on the deployment in Azure. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n",
"\n",
"To solve this problem, you can pass `model_version` parameter to `AzureChatOpenAI` class, which will be added to the model name in the llm output. This way you can easily distinguish between different versions of the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "04b36e75-e8b7-4721-899e-76301ac2ecd9",
"execution_count": 5,
"id": "0531798a",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-community"
"from langchain_community.callbacks import get_openai_callback"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "84c411b0-1790-4798-8bb7-47d8ece4c2dc",
"metadata": {},
"execution_count": 7,
"id": "aceddb72",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Total Cost (USD): $0.000063\n"
"Total Cost (USD): $0.000041\n"
]
}
],
"source": [
"from langchain_community.callbacks import get_openai_callback\n",
"\n",
"model = AzureChatOpenAI(\n",
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
" azure_deployment=os.environ[\n",
" \"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"\n",
" ], # in Azure, this deployment has version 0613 - input and output tokens are counted separately\n",
")\n",
"with get_openai_callback() as cb:\n",
" llm.invoke(messages)\n",
" model.invoke([message])\n",
" print(\n",
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
]
},
{
"cell_type": "markdown",
"id": "2e61eefd",
"metadata": {},
"source": [
"We can provide the model version to `AzureChatOpenAI` constructor. It will get appended to the model name returned by Azure OpenAI and cost will be counted correctly."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "21234693-d92b-4d69-8a7f-55aa062084bf",
"execution_count": 11,
"id": "8d5e54e9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Total Cost (USD): $0.000078\n"
"Total Cost (USD): $0.000044\n"
]
}
],
"source": [
"llm_0301 = AzureChatOpenAI(\n",
" azure_deployment=\"YOUR-DEPLOYMENT\",\n",
" api_version=\"2024-05-01-preview\",\n",
"model0301 = AzureChatOpenAI(\n",
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
" azure_deployment=os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"],\n",
" model_version=\"0301\",\n",
")\n",
"with get_openai_callback() as cb:\n",
" llm_0301.invoke(messages)\n",
" model0301.invoke([message])\n",
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html"
]
}
],
"metadata": {
@@ -338,7 +208,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@@ -2,125 +2,86 @@
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"id": "fbc66410",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: AWS Bedrock\n",
"sidebar_label: Bedrock\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"id": "bf733a38-db84-4363-89e2-de6735c37230",
"metadata": {},
"source": [
"# ChatBedrock\n",
"\n",
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/#chat-models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
"\n",
"For more information on which models are accessible via Bedrock, head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/models-features.html).\n",
"\n",
"For detailed documentation of all ChatBedrock features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatBedrock](https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://api.python.langchain.com/en/latest/aws_api_reference.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-aws?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-aws?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | \n",
"\n",
"## Setup\n",
"\n",
"To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `langchain-aws` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) to sign up to AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by following [these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)."
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": [
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
">[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of \n",
"> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`, \n",
"> `Meta`, `Stability AI`, and `Amazon` via a single API, along with a broad set of capabilities you need to \n",
"> build generative AI applications with security, privacy, and responsible AI. Using `Amazon Bedrock`, \n",
"> you can easily experiment with and evaluate top FMs for your use case, privately customize them with \n",
"> your data using techniques such as fine-tuning and `Retrieval Augmented Generation` (`RAG`), and build \n",
"> agents that execute tasks using your enterprise systems and data sources. Since `Amazon Bedrock` is \n",
"> serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy \n",
"> generative AI capabilities into your applications using the AWS services you are already familiar with.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"execution_count": 2,
"id": "d51edc81",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain Bedrock integration lives in the `langchain-aws` package:"
"%pip install --upgrade --quiet langchain-aws"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-aws"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"execution_count": 1,
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_aws import ChatBedrock\n",
"\n",
"llm = ChatBedrock(\n",
"from langchain_core.messages import HumanMessage"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"chat = ChatBedrock(\n",
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
" model_kwargs=dict(temperature=0),\n",
" # other params...\n",
" model_kwargs={\"temperature\": 0.1},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "62e0dbc3",
"execution_count": 12,
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
"metadata": {
"tags": []
},
@@ -128,30 +89,38 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-fdb07dc3-ff72-430d-b22b-e7824b15c766-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 20, 'completion_tokens': 21, 'total_tokens': 41}}, response_metadata={'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0', 'usage': {'prompt_tokens': 20, 'completion_tokens': 21, 'total_tokens': 41}}, id='run-994f0362-0e50-4524-afad-3c4f5bb11328-0')"
]
},
"execution_count": 5,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
" HumanMessage(\n",
" content=\"Translate this sentence from English to French. I love programming.\"\n",
" )\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
"chat.invoke(messages)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "a4a4f4d4",
"metadata": {},
"source": [
"### Streaming\n",
"\n",
"To stream responses, you can use the runnable `.stream()` method."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"execution_count": 14,
"id": "d9e52838",
"metadata": {},
"outputs": [
{
@@ -160,124 +129,84 @@
"text": [
"Voici la traduction en français :\n",
"\n",
"J'aime la programmation.\n"
"J'aime la programmation."
]
}
],
"source": [
"print(ai_msg.content)"
"for chunk in chat.stream(messages):\n",
" print(chunk.content, end=\"\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"id": "c36575b3",
"metadata": {},
"source": [
"## Chaining\n",
"### LLM Caching with OpenSearch Semantic Cache\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
"Use OpenSearch as a semantic cache to cache prompts and responses and evaluate hits based on semantic similarity.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"execution_count": null,
"id": "375d4e56",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-5ad005ce-9f31-4670-baa0-9373d418698a-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.globals import set_llm_cache\n",
"from langchain_aws import BedrockEmbeddings, ChatBedrock\n",
"from langchain_community.cache import OpenSearchSemanticCache\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
"bedrock_embeddings = BedrockEmbeddings(\n",
" model_id=\"amazon.titan-embed-text-v1\", region_name=\"us-east-1\"\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
"chat = ChatBedrock(\n",
" model_id=\"anthropic.claude-3-haiku-20240307-v1:0\", model_kwargs={\"temperature\": 0.5}\n",
")\n",
"\n",
"# Enable LLM cache. Make sure OpenSearch is set up and running. Update URL accordingly.\n",
"set_llm_cache(\n",
" OpenSearchSemanticCache(\n",
" opensearch_url=\"http://localhost:9200\", embedding=bedrock_embeddings\n",
" )\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"cell_type": "code",
"execution_count": null,
"id": "bb5d25bb",
"metadata": {},
"outputs": [],
"source": [
"## ***Beta***: Bedrock Converse API\n",
"%%time\n",
"# The first time, it is not yet in cache, so it should take longer\n",
"messages = [HumanMessage(content=\"tell me about Amazon Bedrock\")]\n",
"response_text = chat.invoke(messages)\n",
"\n",
"AWS has recently recently the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html#langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse) integration has been released in beta for users who do not need to use custom models.\n",
"\n",
"You can use it like so:"
"print(response_text)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ae728e59-94d4-40cf-9d24-25ad8723fc59",
"execution_count": null,
"id": "6cfb3086",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/bagatur/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: The class `ChatBedrockConverse` is in beta. It is actively being worked on, so the API may change.\n",
" warn_beta(\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'ResponseMetadata': {'RequestId': '122fb1c8-c3c5-4b06-941e-c95d210bfbc7', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Mon, 01 Jul 2024 21:48:25 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '122fb1c8-c3c5-4b06-941e-c95d210bfbc7'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 830}}, id='run-0e3df22f-fcd8-4fbb-a4fb-565227e7e430-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"from langchain_aws import ChatBedrockConverse\n",
"%%time\n",
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
"# so it uses the cached result!\n",
"\n",
"llm = ChatBedrockConverse(\n",
" model=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" # other params...\n",
")\n",
"messages = [HumanMessage(content=\"what is amazon bedrock\")]\n",
"response_text = chat.invoke(messages)\n",
"\n",
"llm.invoke(messages)"
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all ChatBedrock features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html\n",
"\n",
"For detailed documentation of all ChatBedrockConverse features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html"
"print(response_text)"
]
}
],
@@ -297,7 +226,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"id": "53fbf15f",
"metadata": {},
"source": [
"---\n",
@@ -12,129 +12,103 @@
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"id": "bf733a38-db84-4363-89e2-de6735c37230",
"metadata": {},
"source": [
"# ChatCohere\n",
"# Cohere\n",
"\n",
"This doc will help you get started with Cohere [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatCohere features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_cohere.chat_models.ChatCohere.html).\n",
"\n",
"For an overview of all Cohere models head to the [Cohere docs](https://docs.cohere.com/docs/models).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/cohere) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatCohere](https://api.python.langchain.com/en/latest/chat_models/langchain_cohere.chat_models.ChatCohere.html) | [langchain-cohere](https://api.python.langchain.com/en/latest/cohere_api_reference.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-cohere?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-cohere?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
"This notebook covers how to get started with [Cohere chat models](https://cohere.com/chat).\n",
"\n",
"Head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.cohere.ChatCohere.html) for detailed documentation of all attributes and methods."
]
},
{
"cell_type": "markdown",
"id": "3607d67e-e56c-4102-bbba-df2edc0e109e",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"To access Cohere models you'll need to create a Cohere account, get an API key, and install the `langchain-cohere` integration package.\n",
"The integration lives in the `langchain-cohere` package. We can install these with:\n",
"\n",
"### Credentials\n",
"```bash\n",
"pip install -U langchain-cohere\n",
"```\n",
"\n",
"Head to https://dashboard.cohere.com/welcome/login to sign up to Cohere and generate an API key. Once you've done this set the COHERE_API_KEY environment variable:"
"We'll also need to get a [Cohere API key](https://cohere.com/) and set the `COHERE_API_KEY` environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
"execution_count": 11,
"id": "2108b517-1e8d-473d-92fa-4f930e8072a7",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass(\"Enter your Cohere API key: \")"
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"id": "cf690fbb",
"metadata": {},
"source": [
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"execution_count": 12,
"id": "7f11de02",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"id": "4c26754b-b3c9-4d93-8f36-43049bd943bf",
"metadata": {},
"source": [
"### Installation\n",
"## Usage\n",
"\n",
"The LangChain Cohere integration lives in the `langchain-cohere` package:"
"ChatCohere supports all [ChatModel](/docs/how_to#chat-models) functionality:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-cohere"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"execution_count": 13,
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_cohere import ChatCohere\n",
"\n",
"llm = ChatCohere(\n",
" model=\"command-r-plus\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation"
"from langchain_core.messages import HumanMessage"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "62e0dbc3",
"execution_count": 14,
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"chat = ChatCohere(model=\"command\")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
"metadata": {
"tags": []
},
@@ -142,110 +116,134 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'd84f80f3-4611-46e6-aed0-9d8665a20a11', 'token_count': {'input_tokens': 89, 'output_tokens': 5}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'd84f80f3-4611-46e6-aed0-9d8665a20a11', 'token_count': {'input_tokens': 89, 'output_tokens': 5}}, id='run-514ab516-ed7e-48ac-b132-2598fb80ebef-0')"
"AIMessage(content='4 && 5 \\n6 || 7 \\n\\nWould you like to play a game of odds and evens?', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, id='run-3475e0c8-c89b-4937-9300-e07d652455e1-0')"
]
},
"execution_count": 2,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
"messages = [HumanMessage(content=\"1\"), HumanMessage(content=\"2 3\")]\n",
"chat.invoke(messages)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {},
"execution_count": 16,
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-1635e63e-2994-4e7f-986e-152ddfc95777-0')"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await chat.ainvoke(messages)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"J'adore programmer.\n"
"4 && 5"
]
}
],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
"for chunk in chat.stream(messages):\n",
" print(chunk.content, end=\"\", flush=True)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"execution_count": 18,
"id": "064288e4-f184-4496-9427-bcf148fa055e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe Programmierung.', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '053bebde-4e1d-4d06-8ee6-3446e7afa25e', 'token_count': {'input_tokens': 84, 'output_tokens': 6}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '053bebde-4e1d-4d06-8ee6-3446e7afa25e', 'token_count': {'input_tokens': 84, 'output_tokens': 6}}, id='run-53700708-b7fb-417b-af36-1a6fcde38e7d-0')"
"[AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-8d6fade2-1b39-4e31-ab23-4be622dd0027-0')]"
]
},
"execution_count": 4,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
"chat.batch([messages])"
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"id": "f1c56460",
"metadata": {},
"source": [
"## API reference\n",
"## Chaining\n",
"\n",
"For detailed documentation of all ChatCohere features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_cohere.chat_models.ChatCohere.html"
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts#langchain-expression-language-lcel)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "0851b103",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
"chain = prompt | chat"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "ae950c0f-1691-47f1-b609-273033cae707",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='What color socks do bears wear?\\n\\nThey dont wear socks, they have bear feet. \\n\\nHope you laughed! If not, maybe this will help: laughter is the best medicine, and a good sense of humor is infectious!', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, id='run-ef7f9789-0d4d-43bf-a4f7-f2a0e27a5320-0')"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"topic\": \"bears\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-2",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "poetry-venv-2"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -257,7 +255,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.7"
}
},
"nbformat": 4,

View File

@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"id": "529aeba9",
"metadata": {},
"source": [
"---\n",
@@ -11,236 +11,190 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "e49f1e0d",
"id": "642fd21c-600a-47a1-be96-6e1438b421a9",
"metadata": {},
"source": [
"# ChatFireworks\n",
"\n",
"This doc help you get started with Fireworks AI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatFireworks features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html).\n",
">[Fireworks](https://app.fireworks.ai/) accelerates product development on generative AI by creating an innovative AI experiment and production platform. \n",
"\n",
"Fireworks AI is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatFireworks](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html) | [langchain-fireworks](https://api.python.langchain.com/en/latest/fireworks_api_reference.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-fireworks?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-fireworks?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
"\n",
"## Setup\n",
"\n",
"To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `langchain-fireworks` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"Head to (ttps://fireworks.ai/login to sign up to Fireworks and generate an API key. Once you've done this set the FIREWORKS_API_KEY environment variable:"
"This example goes over how to use LangChain to interact with `ChatFireworks` models."
]
},
{
"cell_type": "raw",
"id": "4a7c795e",
"metadata": {},
"source": [
"%pip install langchain-fireworks"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
"execution_count": 1,
"id": "d00d850917865298",
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
}
},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_fireworks import ChatFireworks"
]
},
{
"cell_type": "markdown",
"id": "f28ebf8b-f14f-46c7-9962-8b8dc42e31be",
"metadata": {},
"source": [
"# Setup\n",
"\n",
"1. Make sure the `langchain-fireworks` package is installed in your environment.\n",
"2. Sign in to [Fireworks AI](http://fireworks.ai) for the an API Key to access our models, and make sure it is set as the `FIREWORKS_API_KEY` environment variable.\n",
"3. Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-7b-chat. See the full, most up-to-date model list on [app.fireworks.ai](https://app.fireworks.ai)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d096fb14-8acc-4047-9cd0-c842430c3a1d",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
"if \"FIREWORKS_API_KEY\" not in os.environ:\n",
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Fireworks API Key:\")\n",
"\n",
"# Initialize a Fireworks chat model\n",
"chat = ChatFireworks(model=\"accounts/fireworks/models/mixtral-8x7b-instruct\")"
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"id": "d8f13144-37cf-47a5-b5a0-e3cdf76d9a72",
"metadata": {},
"source": [
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"# Calling the Model Directly\n",
"\n",
"The LangChain Fireworks integration lives in the `langchain-fireworks` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-fireworks"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
"source": [
"from langchain_fireworks import ChatFireworks\n",
"\n",
"llm = ChatFireworks(\n",
" model=\"accounts/fireworks/models/llama-v3-70b-instruct\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'prompt_tokens': 35, 'total_tokens': 44, 'completion_tokens': 9}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-df28e69a-ff30-457e-a743-06eb14d01cb0-0', usage_metadata={'input_tokens': 35, 'output_tokens': 9, 'total_tokens': 44})"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
"You can call the model directly with a system and human message to get answers."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"id": "72340871-ae2f-415f-b399-0777d32dc379",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"Hello! I'm an AI language model, a helpful assistant designed to chat and assist you with any questions or information you might need. I'm here to make your experience as smooth and enjoyable as possible. How can I assist you today?\")"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# ChatFireworks Wrapper\n",
"system_message = SystemMessage(content=\"You are to chat with the user.\")\n",
"human_message = HumanMessage(content=\"Who are you?\")\n",
"\n",
"chat.invoke([system_message, human_message])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "68c6b1fa-2ff7-4a63-8d88-3cec302180b8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI and do not have the ability to experience the weather firsthand. However,\")"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Setting additional parameters: temperature, max_tokens, top_p\n",
"chat = ChatFireworks(\n",
" model=\"accounts/fireworks/models/mixtral-8x7b-instruct\",\n",
" temperature=1,\n",
" max_tokens=20,\n",
")\n",
"system_message = SystemMessage(content=\"You are to chat with the user.\")\n",
"human_message = HumanMessage(content=\"How's the weather today?\")\n",
"chat.invoke([system_message, human_message])"
]
},
{
"cell_type": "markdown",
"id": "8c44cb36",
"metadata": {},
"source": [
"# Tool Calling\n",
"\n",
"Fireworks offers the `FireFunction-v2` tool calling model. You can use it for structured output and function calling use cases:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "ee2db682",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"J'adore la programmation.\n"
"{'function': {'arguments': '{\"name\": \"Erick\", \"age\": 27}',\n",
" 'name': 'ExtractFields'},\n",
" 'id': 'call_J0WYP2TLenaFw3UeVU0UnWqx',\n",
" 'index': 0,\n",
" 'type': 'function'}\n"
]
}
],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"from pprint import pprint\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
"from langchain_core.pydantic_v1 import BaseModel\n",
"\n",
"\n",
"class ExtractFields(BaseModel):\n",
" name: str\n",
" age: int\n",
"\n",
"\n",
"chat = ChatFireworks(\n",
" model=\"accounts/fireworks/models/firefunction-v2\",\n",
").bind_tools([ExtractFields])\n",
"\n",
"result = chat.invoke(\"I am a 27 year old named Erick\")\n",
"\n",
"pprint(result.additional_kwargs[\"tool_calls\"][0])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"execution_count": null,
"id": "2321a4e6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'prompt_tokens': 30, 'total_tokens': 37, 'completion_tokens': 7}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-ff3f91ad-ed81-4acf-9f59-7490dc8d8f48-0', usage_metadata={'input_tokens': 30, 'output_tokens': 7, 'total_tokens': 37})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html"
]
"outputs": [],
"source": []
}
],
"metadata": {
@@ -259,7 +213,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

View File

@@ -1,585 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "1c95cd76",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: IBM watsonx.ai\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "70996d8a",
"metadata": {},
"source": [
"# ChatWatsonx\n",
"\n",
">ChatWatsonx is a wrapper for IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) foundation models.\n",
"\n",
"The aim of these examples is to show how to communicate with `watsonx.ai` models using `LangChain` LLMs API."
]
},
{
"cell_type": "markdown",
"id": "ef7b088a",
"metadata": {},
"source": [
"## Overview\n",
"\n",
"### Integration details\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/openai) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatWatsonx](https://api.python.langchain.com/en/latest/ibm_api_reference.html) | [langchain-ibm](https://api.python.langchain.com/en/latest/ibm_api_reference.html) | ❌ | ❌ | ❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-ibm?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-ibm?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | "
]
},
{
"cell_type": "markdown",
"id": "f406e092",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"To access IBM watsonx.ai models you'll need to create an IBM watsonx.ai account, get an API key, and install the `langchain-ibm` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"The cell below defines the credentials required to work with watsonx Foundation Model inferencing.\n",
"\n",
"**Action:** Provide the IBM Cloud user API key. For details, see\n",
"[Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey&interface=ui)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "11d572a1",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
"\n",
"watsonx_api_key = getpass()\n",
"os.environ[\"WATSONX_APIKEY\"] = watsonx_api_key"
]
},
{
"cell_type": "markdown",
"id": "c59782a7",
"metadata": {},
"source": [
"Additionally you are able to pass additional secrets as an environment variable. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f98c573c",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"WATSONX_URL\"] = \"your service instance url\"\n",
"os.environ[\"WATSONX_TOKEN\"] = \"your token for accessing the CPD cluster\"\n",
"os.environ[\"WATSONX_PASSWORD\"] = \"your password for accessing the CPD cluster\"\n",
"os.environ[\"WATSONX_USERNAME\"] = \"your username for accessing the CPD cluster\"\n",
"os.environ[\"WATSONX_INSTANCE_ID\"] = \"your instance_id for accessing the CPD cluster\""
]
},
{
"cell_type": "markdown",
"id": "b3dc9176",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain IBM integration lives in the `langchain-ibm` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "387eda86",
"metadata": {},
"outputs": [],
"source": [
"!pip install -qU langchain-ibm"
]
},
{
"cell_type": "markdown",
"id": "e36acbef",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"You might need to adjust model `parameters` for different models or tasks. For details, refer to [Available MetaNames](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#metanames.GenTextParamsMetaNames)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "407cd500",
"metadata": {},
"outputs": [],
"source": [
"parameters = {\n",
" \"decoding_method\": \"sample\",\n",
" \"max_new_tokens\": 100,\n",
" \"min_new_tokens\": 1,\n",
" \"stop_sequences\": [\".\"],\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "2b586538",
"metadata": {},
"source": [
"Initialize the `WatsonxLLM` class with the previously set parameters.\n",
"\n",
"\n",
"**Note**: \n",
"\n",
"- To provide context for the API call, you must pass the `project_id` or `space_id`. To get your project or space ID, open your project or space, go to the **Manage** tab, and click **General**. For more information see: [Project documentation](https://www.ibm.com/docs/en/watsonx-as-a-service?topic=projects) or [Deployment space documentation](https://www.ibm.com/docs/en/watsonx/saas?topic=spaces-creating-deployment).\n",
"- Depending on the region of your provisioned service instance, use one of the urls listed in [watsonx.ai API Authentication](https://ibm.github.io/watsonx-ai-python-sdk/setup_cloud.html#authentication).\n",
"\n",
"In this example, well use the `project_id` and Dallas URL.\n",
"\n",
"\n",
"You need to specify the `model_id` that will be used for inferencing. You can find the list of all the available models in [Supported foundation models](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#ibm_watsonx_ai.foundation_models.utils.enums.ModelTypes)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "98371396",
"metadata": {},
"outputs": [],
"source": [
"from langchain_ibm import ChatWatsonx\n",
"\n",
"chat = ChatWatsonx(\n",
" model_id=\"ibm/granite-13b-chat-v2\",\n",
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
" params=parameters,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2202f4e0",
"metadata": {},
"source": [
"Alternatively, you can use Cloud Pak for Data credentials. For details, see [watsonx.ai software setup](https://ibm.github.io/watsonx-ai-python-sdk/setup_cpd.html). "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "243ecccb",
"metadata": {},
"outputs": [],
"source": [
"chat = ChatWatsonx(\n",
" model_id=\"ibm/granite-13b-chat-v2\",\n",
" url=\"PASTE YOUR URL HERE\",\n",
" username=\"PASTE YOUR USERNAME HERE\",\n",
" password=\"PASTE YOUR PASSWORD HERE\",\n",
" instance_id=\"openshift\",\n",
" version=\"4.8\",\n",
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
" params=parameters,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "96ed13d4",
"metadata": {},
"source": [
"Instead of `model_id`, you can also pass the `deployment_id` of the previously tuned model. The entire model tuning workflow is described in [Working with TuneExperiment and PromptTuner](https://ibm.github.io/watsonx-ai-python-sdk/pt_working_with_class_and_prompt_tuner.html)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "08e66c88",
"metadata": {},
"outputs": [],
"source": [
"chat = ChatWatsonx(\n",
" deployment_id=\"PASTE YOUR DEPLOYMENT_ID HERE\",\n",
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
" params=parameters,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "f571001d",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"To obtain completions, you can call the model directly using a string prompt."
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "beea2b5b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"Je t'aime pour écouter la Rock.\", response_metadata={'token_usage': {'generated_token_count': 12, 'input_token_count': 28}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-05b305ce-5401-4a10-b557-41a4b15c7f6f-0')"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Invocation\n",
"\n",
"messages = [\n",
" (\"system\", \"You are a helpful assistant that translates English to French.\"),\n",
" (\n",
" \"human\",\n",
" \"I love you for listening to Rock.\",\n",
" ),\n",
"]\n",
"\n",
"chat.invoke(messages)"
]
},
{
"cell_type": "code",
"execution_count": 41,
"id": "8ab1a25a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Sure, I can help you with that! Horses are large, powerful mammals that belong to the family Equidae.', response_metadata={'token_usage': {'generated_token_count': 24, 'input_token_count': 24}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-391776ff-3b38-4768-91e8-ff64177149e5-0')"
]
},
"execution_count": 41,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Invocation multiple chat\n",
"from langchain_core.messages import (\n",
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"\n",
"system_message = SystemMessage(\n",
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
")\n",
"human_message = HumanMessage(content=\"horse\")\n",
"\n",
"chat.invoke([system_message, human_message])"
]
},
{
"cell_type": "markdown",
"id": "20e4b568",
"metadata": {},
"source": [
"## Chaining\n",
"Create `ChatPromptTemplate` objects which will be responsible for creating a random question."
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "dd919925",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"system = (\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
")\n",
"human = \"{input}\"\n",
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])"
]
},
{
"cell_type": "markdown",
"id": "1a013a53",
"metadata": {},
"source": [
"Provide a inputs and run the chain."
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "68160377",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe Python.', response_metadata={'token_usage': {'generated_token_count': 5, 'input_token_count': 23}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-1b1ccf5d-0e33-46f2-a087-e2a136ba1fb7-0')"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain = prompt | chat\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love Python\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d2c9da33",
"metadata": {},
"source": [
"## Streaming the Model output \n",
"\n",
"You can stream the model output."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3f63166a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The moon is a natural satellite of the Earth, and it has been a source of fascination for humans for centuries."
]
}
],
"source": [
"system_message = SystemMessage(\n",
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
")\n",
"human_message = HumanMessage(content=\"moon\")\n",
"\n",
"for chunk in chat.stream([system_message, human_message]):\n",
" print(chunk.content, end=\"\")"
]
},
{
"cell_type": "markdown",
"id": "5a7a2aa1",
"metadata": {},
"source": [
"## Batch the Model output \n",
"\n",
"You can batch the model output."
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "9e948729",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[AIMessage(content='Cats are domestic animals that belong to the Felidae family.', response_metadata={'token_usage': {'generated_token_count': 13, 'input_token_count': 24}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-71a8bd7a-a1aa-497b-9bdd-a4d6fe1d471a-0'),\n",
" AIMessage(content='Dogs are domesticated mammals of the family Canidae, characterized by their adaptability to various environments and social structures.', response_metadata={'token_usage': {'generated_token_count': 24, 'input_token_count': 24}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-22b7a0cb-e44a-4b68-9921-872f82dcd82b-0')]"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"message_1 = [\n",
" SystemMessage(\n",
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
" ),\n",
" HumanMessage(content=\"cat\"),\n",
"]\n",
"message_2 = [\n",
" SystemMessage(\n",
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
" ),\n",
" HumanMessage(content=\"dog\"),\n",
"]\n",
"\n",
"chat.batch([message_1, message_2])"
]
},
{
"cell_type": "markdown",
"id": "c739e1fe",
"metadata": {},
"source": [
"## Tool calling\n",
"\n",
"### ChatWatsonx.bind_tools()\n",
"\n",
"Please note that `ChatWatsonx.bind_tools` is on beta state, so right now we only support `mistralai/mixtral-8x7b-instruct-v01` model.\n",
"\n",
"You should also redefine `max_new_tokens` parameter to get the entire model response. By default `max_new_tokens` is set to 20."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "328fce76",
"metadata": {},
"outputs": [],
"source": [
"from langchain_ibm import ChatWatsonx\n",
"\n",
"parameters = {\"max_new_tokens\": 200}\n",
"\n",
"chat = ChatWatsonx(\n",
" model_id=\"mistralai/mixtral-8x7b-instruct-v01\",\n",
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
" params=parameters,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e1633a73",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
" \"\"\"Get the current weather in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"llm_with_tools = chat.bind_tools([GetWeather])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3bf9b8ab",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'type': 'function'}, 'tool_calls': [{'type': 'function', 'function': {'name': 'GetWeather', 'arguments': '{\"location\": \"Los Angeles\"}'}, 'id': None}, {'type': 'function', 'function': {'name': 'GetWeather', 'arguments': '{\"location\": \"New York\"}'}, 'id': None}]}, response_metadata={'token_usage': {'generated_token_count': 99, 'input_token_count': 320}, 'model_name': 'mistralai/mixtral-8x7b-instruct-v01', 'system_fingerprint': '', 'finish_reason': 'eos_token'}, id='run-38627104-f2ac-4edb-8390-d5425fb65979-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'Los Angeles'}, 'id': None}, {'name': 'GetWeather', 'args': {'location': 'New York'}, 'id': None}])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ai_msg = llm_with_tools.invoke(\n",
" \"Which city is hotter today: LA or NY?\",\n",
")\n",
"ai_msg"
]
},
{
"cell_type": "markdown",
"id": "ba03dbf4",
"metadata": {},
"source": [
"### AIMessage.tool_calls\n",
"Notice that the AIMessage has a `tool_calls` attribute. This contains in a standardized ToolCall format that is model-provider agnostic."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "38f10ba7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetWeather', 'args': {'location': 'Los Angeles'}, 'id': None},\n",
" {'name': 'GetWeather', 'args': {'location': 'New York'}, 'id': None}]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ai_msg.tool_calls"
]
},
{
"cell_type": "markdown",
"id": "9ee72a59",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all IBM watsonx.ai features and configurations head to the API reference: https://api.python.langchain.com/en/latest/ibm_api_reference.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.1.undefined"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -41,7 +41,7 @@
"\n",
"### Installation\n",
"\n",
"The LangChain LlamaCpp integration lives in the `langchain-community` and `llama-cpp-python` packages:"
"The LangChain OpenAI integration lives in the `langchain-community` and `llama-cpp-python` packages:"
]
},
{

View File

@@ -15,85 +15,85 @@
"source": [
"# OllamaFunctions\n",
"\n",
"This notebook shows how to use an experimental wrapper around Ollama that gives it [tool calling capabilities](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling).\n",
"This notebook shows how to use an experimental wrapper around Ollama that gives it the same API as OpenAI Functions.\n",
"\n",
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
"\n",
":::warning\n",
"\n",
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
"\n",
":::\n",
"## Overview\n",
"\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
"|:-----------------------------------------------------------------------------------------------------------------------------------:|:-------:|:-----:|:------------:|:----------:|:-----------------:|:--------------:|\n",
"| [OllamaFunctions](https://api.python.langchain.com/en/latest/llms/langchain_experimental.llms.ollama_function.OllamaFunctions.html) | [langchain-experimental](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ✅ | ❌ | ❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-experimental?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-experimental?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"\n",
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ |\n",
"\n",
"## Setup\n",
"\n",
"To access `OllamaFunctions` you will need to install `langchain-experimental` integration package.\n",
"Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance as well as download and serve [supported models](https://ollama.com/library).\n",
"Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance.\n",
"\n",
"### Credentials\n",
"## Usage\n",
"\n",
"Credentials support is not present at this time.\n",
"\n",
"### Installation\n",
"\n",
"The `OllamaFunctions` class lives in the `langchain-experimental` package:\n"
"You can initialize OllamaFunctions in a similar way to how you'd initialize a standard ChatOllama instance:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-experimental"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"`OllamaFunctions` takes the same init parameters as `ChatOllama`. \n",
"\n",
"In order to use tool calling, you must also specify `format=\"json\"`."
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2024-06-23T15:20:21.818089Z",
"start_time": "2024-06-23T15:20:21.815759Z"
}
"end_time": "2024-04-28T00:53:25.276543Z",
"start_time": "2024-04-28T00:53:24.881202Z"
},
"scrolled": true
},
"outputs": [],
"source": [
"from langchain_experimental.llms.ollama_functions import OllamaFunctions\n",
"\n",
"llm = OllamaFunctions(model=\"phi3\")"
"model = OllamaFunctions(model=\"llama3\", format=\"json\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Invocation"
"You can then bind functions defined with JSON Schema parameters and a `function_call` parameter to force the model to call the given function:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:17.270931Z",
"start_time": "2024-04-26T04:59:17.263347Z"
}
},
"outputs": [],
"source": [
"model = model.bind_tools(\n",
" tools=[\n",
" {\n",
" \"name\": \"get_current_weather\",\n",
" \"description\": \"Get the current weather in a given location\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"location\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city and state, \" \"e.g. San Francisco, CA\",\n",
" },\n",
" \"unit\": {\n",
" \"type\": \"string\",\n",
" \"enum\": [\"celsius\", \"fahrenheit\"],\n",
" },\n",
" },\n",
" \"required\": [\"location\"],\n",
" },\n",
" }\n",
" ],\n",
" function_call={\"name\": \"get_current_weather\"},\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Calling a function with this model then results in JSON output matching the provided schema:"
]
},
{
@@ -101,15 +101,15 @@
"execution_count": 3,
"metadata": {
"ExecuteTime": {
"end_time": "2024-06-23T15:20:46.794689Z",
"start_time": "2024-06-23T15:20:44.982632Z"
"end_time": "2024-04-26T04:59:26.092428Z",
"start_time": "2024-04-26T04:59:17.272627Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore programmer.\", id='run-94815fcf-ae11-438a-ba3f-00819328b5cd-0')"
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\"}'}}, id='run-1791f9fe-95ad-4ca4-bdf7-9f73eab31e6f-0')"
]
},
"execution_count": 3,
@@ -118,55 +118,79 @@
}
],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"J'adore programmer.\""
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ai_msg.content"
"from langchain_core.messages import HumanMessage\n",
"\n",
"model.invoke(\"what is the weather in Boston?\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chaining\n",
"## Structured Output\n",
"\n",
"We can [chain](https://python.langchain.com/v0.2/docs/how_to/sequence/) our model with a prompt template like so:"
"One useful thing you can do with function calling using `with_structured_output()` function is extracting properties from a given input in a structured format:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:26.098828Z",
"start_time": "2024-04-26T04:59:26.094021Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"# Schema for structured response\n",
"class Person(BaseModel):\n",
" name: str = Field(description=\"The person's name\", required=True)\n",
" height: float = Field(description=\"The person's height\", required=True)\n",
" hair_color: str = Field(description=\"The person's hair color\")\n",
"\n",
"\n",
"# Prompt template\n",
"prompt = PromptTemplate.from_template(\n",
" \"\"\"Alex is 5 feet tall. \n",
"Claudia is 1 feet taller than Alex and jumps higher than him. \n",
"Claudia is a brunette and Alex is blonde.\n",
"\n",
"Human: {question}\n",
"AI: \"\"\"\n",
")\n",
"\n",
"# Chain\n",
"llm = OllamaFunctions(model=\"phi3\", format=\"json\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Person)\n",
"chain = prompt | structured_llm"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Extracting data about Alex"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:30.164955Z",
"start_time": "2024-04-26T04:59:26.099790Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Programmieren ist sehr verrückt! Es freut mich, dass Sie auf Programmierung so positiv eingestellt sind.', id='run-ee99be5e-4d48-4ab6-b602-35415f0bdbde-0')"
"Person(name='Alex', height=5.0, hair_color='blonde')"
]
},
"execution_count": 5,
@@ -175,123 +199,41 @@
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
"alex = chain.invoke(\"Describe Alex\")\n",
"alex"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Tool Calling\n",
"\n",
"### OllamaFunctions.bind_tools()\n",
"\n",
"With `OllamaFunctions.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to a tool definition schemas, which looks like:"
"### Extracting data about Claudia"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
" \"\"\"Get the current weather in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"llm_with_tools = llm.bind_tools([GetWeather])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"execution_count": 6,
"metadata": {
"ExecuteTime": {
"end_time": "2024-04-26T04:59:31.509846Z",
"start_time": "2024-04-26T04:59:30.165662Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', id='run-b9769435-ec6a-4cb8-8545-5a5035fc19bd-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': 'call_064c4e1cb27e4adb9e4e7ed60362ecc9'}])"
"Person(name='Claudia', height=6.0, hair_color='brunette')"
]
},
"execution_count": 8,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ai_msg = llm_with_tools.invoke(\n",
" \"what is the weather like in San Francisco\",\n",
")\n",
"ai_msg"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### AIMessage.tool_calls\n",
"\n",
"Notice that the AIMessage has a `tool_calls` attribute. This contains in a standardized `ToolCall` format that is model-provider agnostic."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetWeather',\n",
" 'args': {'location': 'San Francisco, CA'},\n",
" 'id': 'call_064c4e1cb27e4adb9e4e7ed60362ecc9'}]"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ai_msg.tool_calls"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": "For more on binding tools and tool call outputs, head to the [tool calling](docs/how_to/function_calling) docs."
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all ToolCallingLLM features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/langchain_experimental.llms.ollama_functions.OllamaFunctions.html\n"
"claudia = chain.invoke(\"Describe Claudia\")\n",
"claudia"
]
}
],
@@ -311,7 +253,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -426,7 +426,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -45,7 +45,7 @@
"The code provided assumes that your PPLX_API_KEY is set in your environment variables. If you would like to manually specify your API key and also choose a different model, you can use the following code:\n",
"\n",
"```python\n",
"chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"llama-3-sonar-small-32k-online\")\n",
"chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"pplx-70b-online\")\n",
"```\n",
"\n",
"You can check a list of available models [here](https://docs.perplexity.ai/docs/model-cards). For reproducibility, we can set the API key dynamically by taking it as an input in this notebook."
@@ -78,7 +78,7 @@
},
"outputs": [],
"source": [
"chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")"
"chat = ChatPerplexity(temperature=0, model=\"pplx-70b-online\")"
]
},
{
@@ -146,7 +146,7 @@
}
],
"source": [
"chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")\n",
"chat = ChatPerplexity(temperature=0, model=\"pplx-70b-online\")\n",
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Tell me a joke about {topic}\")])\n",
"chain = prompt | chat\n",
"response = chain.invoke({\"topic\": \"cats\"})\n",
@@ -195,7 +195,7 @@
}
],
"source": [
"chat = ChatPerplexity(temperature=0.7, model=\"llama-3-sonar-small-32k-online\")\n",
"chat = ChatPerplexity(temperature=0.7, model=\"pplx-70b-online\")\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"human\", \"Give me a list of famous tourist attractions in Pakistan\")]\n",
")\n",

View File

@@ -1,19 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "bd931196",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "344fc5a3",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "a792e839",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "61c2629c",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "e329385c",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "3169f380",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "87552e5a",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "9a10cdcc",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "41200199",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

File diff suppressed because one or more lines are too long

View File

@@ -13,7 +13,7 @@
"\n",
"Headless mode means that the browser is running without a graphical user interface.\n",
"\n",
"In the below example we'll use the `AsyncChromiumLoader` to loads the page, and then the [`Html2TextTransformer`](/docs/integrations/document_transformers/html2text/) to strip out the HTML tags and other semantic information."
"`AsyncChromiumLoader` loads the page, and then we use `Html2TextTransformer` to transform to text."
]
},
{
@@ -23,22 +23,48 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet playwright beautifulsoup4 html2text\n",
"%pip install --upgrade --quiet playwright beautifulsoup4\n",
"!playwright install"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "dd2cdea7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'<!DOCTYPE html><html lang=\"en\"><head><script src=\"https://s0.2mdn.net/instream/video/client.js\" asyn'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
"\n",
"urls = [\"https://www.wsj.com\"]\n",
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
"docs = loader.load()\n",
"docs[0].page_content[0:100]"
]
},
{
"cell_type": "markdown",
"id": "00487c0f",
"id": "c64e7df9",
"metadata": {},
"source": [
"**Note:** If you are using Jupyter notebooks, you might also need to install and apply `nest_asyncio` before loading the documents like this:"
"If you are using Jupyter notebooks, you might need to apply `nest_asyncio` before loading the documents."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d374eef4",
"id": "5f2fe3c0",
"metadata": {},
"outputs": [],
"source": [
@@ -48,40 +74,6 @@
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "dd2cdea7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'<!DOCTYPE html><html lang=\"en\" dir=\"ltr\" class=\"docs-wrapper docs-doc-page docs-version-2.0 plugin-d'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
"\n",
"urls = [\"https://docs.smith.langchain.com/\"]\n",
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
"docs = loader.load()\n",
"docs[0].page_content[0:100]"
]
},
{
"cell_type": "markdown",
"id": "7eb5e6aa",
"metadata": {},
"source": [
"Now let's transform the documents into a more readable syntax using the transformer:"
]
},
{
"cell_type": "code",
"execution_count": 6,
@@ -91,7 +83,7 @@
{
"data": {
"text/plain": [
"'Skip to main content\\n\\nGo to API Docs\\n\\nSearch`⌘``K`\\n\\nGo to App\\n\\n * Quick start\\n * Tutorials\\n\\n * How-to guides\\n\\n * Concepts\\n\\n * Reference\\n\\n * Pricing\\n * Self-hosting\\n\\n * LangGraph Cloud\\n\\n * * Quick start\\n\\nOn this page\\n\\n# Get started with LangSmith\\n\\n**LangSmith** is a platform for building production-grade LLM applications. It\\nallows you to closely monitor and evaluate your application, so you can ship\\nquickly and with confidence. Use of LangChain is not necessary - LangSmith\\nworks on it'"
"\"Skip to Main ContentSkip to SearchSkip to... Select * Top News * What's News *\\nFeatured Stories * Retirement * Life & Arts * Hip-Hop * Sports * Video *\\nEconomy * Real Estate * Sports * CMO * CIO * CFO * Risk & Compliance *\\nLogistics Report * Sustainable Business * Heard on the Street * Barrons *\\nMarketWatch * Mansion Global * Penta * Opinion * Journal Reports * Sponsored\\nOffers Explore Our Brands * WSJ * * * * * Barron's * * * * * MarketWatch * * *\\n* * IBD # The Wall Street Journal SubscribeSig\""
]
},
"execution_count": 6,
@@ -124,7 +116,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.16"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

View File

@@ -7,9 +7,7 @@
"source": [
"# Email\n",
"\n",
"This notebook shows how to load email (`.eml`) or `Microsoft Outlook` (`.msg`) files.\n",
"\n",
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
"This notebook shows how to load email (`.eml`) or `Microsoft Outlook` (`.msg`) files."
]
},
{
@@ -29,13 +27,49 @@
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet unstructured"
"%pip install --upgrade --quiet unstructured"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "40cd9806",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredEmailLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2d20b852",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2d20b852",
"id": "579fa702",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "90c1d899",
"metadata": {
"tags": []
},
@@ -43,21 +77,15 @@
{
"data": {
"text/plain": [
"[Document(page_content='This is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': './example_data/fake-email.eml'})]"
"[Document(page_content='This is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': 'example_data/fake-email.eml'})]"
]
},
"execution_count": 3,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredEmailLoader\n",
"\n",
"loader = UnstructuredEmailLoader(\"./example_data/fake-email.eml\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data"
]
},
@@ -73,26 +101,42 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"id": "b9592eaf",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\", mode=\"elements\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "0b16d03f",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d7bdc5e5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'file_directory': 'example_data', 'filename': 'fake-email.eml', 'last_modified': '2022-12-16T17:04:16-05:00', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'languages': ['eng'], 'filetype': 'message/rfc822', 'category': 'NarrativeText'})"
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'filename': 'fake-email.eml', 'file_directory': 'example_data', 'date': '2022-12-16T17:04:16-05:00', 'filetype': 'message/rfc822', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'category': 'NarrativeText'})"
]
},
"execution_count": 4,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\", mode=\"elements\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data[0]"
]
},
@@ -108,30 +152,46 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 8,
"id": "6539f166",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'file_directory': 'example_data', 'filename': 'fake-email.eml', 'last_modified': '2022-12-16T17:04:16-05:00', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'languages': ['eng'], 'filetype': 'message/rfc822', 'category': 'NarrativeText'})"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"loader = UnstructuredEmailLoader(\n",
" \"example_data/fake-email.eml\",\n",
" mode=\"elements\",\n",
" process_attachments=True,\n",
")\n",
"\n",
"data = loader.load()\n",
"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "aebead38",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "ddeb60f4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'filename': 'fake-email.eml', 'file_directory': 'example_data', 'date': '2022-12-16T17:04:16-05:00', 'filetype': 'message/rfc822', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'category': 'NarrativeText'})"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data[0]"
]
},
@@ -150,33 +210,57 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet extract_msg"
"%pip install --upgrade --quiet extract_msg"
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "1e7a8444",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import OutlookMessageLoader"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "77a055e6",
"metadata": {},
"outputs": [],
"source": [
"loader = OutlookMessageLoader(\"example_data/fake-email.msg\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "789882de",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "46aa0632",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='This is a test email to experiment with the MS Outlook MSG Extractor\\r\\n\\r\\n\\r\\n-- \\r\\n\\r\\n\\r\\nKind regards\\r\\n\\r\\n\\r\\n\\r\\n\\r\\nBrian Zhou\\r\\n\\r\\n', metadata={'source': 'example_data/fake-email.msg', 'subject': 'Test for TIF files', 'sender': 'Brian Zhou <brizhou@gmail.com>', 'date': datetime.datetime(2013, 11, 18, 0, 26, 24, tzinfo=zoneinfo.ZoneInfo(key='America/Los_Angeles'))})"
"Document(page_content='This is a test email to experiment with the MS Outlook MSG Extractor\\r\\n\\r\\n\\r\\n-- \\r\\n\\r\\n\\r\\nKind regards\\r\\n\\r\\n\\r\\n\\r\\n\\r\\nBrian Zhou\\r\\n\\r\\n', metadata={'subject': 'Test for TIF files', 'sender': 'Brian Zhou <brizhou@gmail.com>', 'date': 'Mon, 18 Nov 2013 16:26:24 +0800'})"
]
},
"execution_count": 7,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import OutlookMessageLoader\n",
"\n",
"loader = OutlookMessageLoader(\"example_data/fake-email.msg\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data[0]"
]
},
@@ -205,7 +289,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.8.13"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 408 KiB

View File

@@ -1,723 +0,0 @@
Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.
Last year COVID-19 kept us apart. This year we are finally together again.
Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.
With a duty to one another to the American people to the Constitution.
And with an unwavering resolve that freedom will always triumph over tyranny.
Six days ago, Russias Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated.
He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined.
He met the Ukrainian people.
From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.
Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight.
Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world.
Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people.
Throughout our history weve learned this lesson when dictators do not pay a price for their aggression they cause more chaos.
They keep moving.
And the costs and the threats to America and the world keep rising.
Thats why the NATO Alliance was created to secure peace and stability in Europe after World War 2.
The United States is a member along with 29 other nations.
It matters. American diplomacy matters. American resolve matters.
Putins latest attack on Ukraine was premeditated and unprovoked.
He rejected repeated efforts at diplomacy.
He thought the West and NATO wouldnt respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did.
We prepared extensively and carefully.
We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.
I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression.
We countered Russias lies with truth.
And now that he has acted the free world is holding him accountable.
Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland.
We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever.
Together with our allies we are right now enforcing powerful economic sanctions.
We are cutting off Russias largest banks from the international financial system.
Preventing Russias central bank from defending the Russian Ruble making Putins $630 Billion “war fund” worthless.
We are choking off Russias access to technology that will sap its economic strength and weaken its military for years to come.
Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more.
The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs.
We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.
And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights further isolating Russia and adding an additional squeeze on their economy. The Ruble has lost 30% of its value.
The Russian stock market has lost 40% of its value and trading remains suspended. Russias economy is reeling and Putin alone is to blame.
Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance.
We are giving more than $1 Billion in direct assistance to Ukraine.
And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering.
Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine.
Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies in the event that Putin decides to keep moving west.
For that purpose weve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia.
As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power.
And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them.
Putin has unleashed violence and chaos. But while he may make gains on the battlefield he will pay a continuing high price over the long run.
And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.
To all Americans, I will be honest with you, as Ive always promised. A Russian dictator, invading a foreign country, has costs around the world.
And Im taking robust action to make sure the pain of our sanctions is targeted at Russias economy. And I will use every tool at our disposal to protect American businesses and consumers.
Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.
America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies.
These steps will help blunt gas prices here at home. And I know the news about whats happening can seem alarming.
But I want you to know that we are going to be okay.
When the history of this era is written Putins war on Ukraine will have left Russia weaker and the rest of the world stronger.
While it shouldnt have taken something so terrible for people around the world to see whats at stake now everyone sees it clearly.
We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine.
In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security.
This is a real test. Its going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people.
To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you.
Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people.
He will never extinguish their love of freedom. He will never weaken the resolve of the free world.
We meet tonight in an America that has lived through two of the hardest years this nation has ever faced.
The pandemic has been punishing.
And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more.
I understand.
I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it.
Thats why one of the first things I did as President was fight to pass the American Rescue Plan.
Because people were hurting. We needed to act, and we did.
Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis.
It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans.
Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance.
And as my Dad used to say, it gave people a little breathing room.
And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind.
And it worked. It created jobs. Lots of jobs.
In fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year
than ever before in the history of America.
Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasnt worked for the working people of this nation for too long.
For the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else.
But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century.
Vice President Harris and I ran for office with a new economic vision for America.
Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up
and the middle out, not from the top down.
Because we know that when the middle class grows, the poor have a ladder up and the wealthy do very well.
America used to have the best roads, bridges, and airports on Earth.
Now our infrastructure is ranked 13th in the world.
We wont be able to compete for the jobs of the 21st Century if we dont fix that.
Thats why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history.
This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen.
Were done talking about infrastructure weeks.
Were going to have an infrastructure decade.
It is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China.
As Ive told Xi Jinping, it is never a good bet to bet against the American people.
Well create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America.
And well do it all to withstand the devastating effects of the climate crisis and promote environmental justice.
Well build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities.
4,000 projects have already been announced.
And tonight, Im announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair.
When we use taxpayer dollars to rebuild America we are going to Buy American: buy American products to support American jobs.
The federal government spends about $600 Billion a year to keep the country safe and secure.
Theres been a law on the books for almost a century
to make sure taxpayers dollars support American jobs and businesses.
Every Administration says theyll do it, but we are actually doing it.
We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America.
But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors.
Thats why it is so important to pass the Bipartisan Innovation Act sitting in Congress that will make record investments in emerging technologies and American manufacturing.
Let me give you one example of why its so important to pass it.
If you travel 20 miles east of Columbus, Ohio, youll find 1,000 empty acres of land.
It wont look like much, but if you stop and look closely, youll see a “Field of dreams,” the ground on which Americas future will be built.
This is where Intel, the American company that helped build Silicon Valley, is going to build its $20 billion semiconductor “mega site”.
Up to eight state-of-the-art factories in one place. 10,000 new good-paying jobs.
Some of the most sophisticated manufacturing in the world to make computer chips the size of a fingertip that power the world and our everyday lives.
Smartphones. The Internet. Technology we have yet to invent.
But thats just the beginning.
Intels CEO, Pat Gelsinger, who is here tonight, told me they are ready to increase their investment from
$20 billion to $100 billion.
That would be one of the biggest investments in manufacturing in American history.
And all theyre waiting for is for you to pass this bill.
So lets not wait any longer. Send it to my desk. Ill sign it.
And we will really take off.
And Intel is not alone.
Theres something happening in America.
Just look around and youll see an amazing story.
The rebirth of the pride that comes from stamping products “Made In America.” The revitalization of American manufacturing.
Companies are choosing to build new factories here, when just a few years ago, they would have built them overseas.
Thats what is happening. Ford is investing $11 billion to build electric vehicles, creating 11,000 jobs across the country.
GM is making the largest investment in its history—$7 billion to build electric vehicles, creating 4,000 jobs in Michigan.
All told, we created 369,000 new manufacturing jobs in America just last year.
Powered by people Ive met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, whos here with us tonight.
As Ohio Senator Sherrod Brown says, “Its time to bury the label “Rust Belt.”
Its time.
But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills.
Inflation is robbing them of the gains they might otherwise feel.
I get it. Thats why my top priority is getting prices under control.
Look, our economy roared back faster than most predicted, but the pandemic meant that businesses had a hard time hiring enough workers to keep up production in their factories.
The pandemic also disrupted global supply chains.
When factories close, it takes longer to make goods and get them from the warehouse to the store, and prices go up.
Look at cars.
Last year, there werent enough semiconductors to make all the cars that people wanted to buy.
And guess what, prices of automobiles went up.
So—we have a choice.
One way to fight inflation is to drive down wages and make Americans poorer.
I have a better plan to fight inflation.
Lower your costs, not your wages.
Make more cars and semiconductors in America.
More infrastructure and innovation in America.
More goods moving faster and cheaper in America.
More jobs where you can earn a good living in America.
And instead of relying on foreign supply chains, lets make it in America.
Economists call it “increasing the productive capacity of our economy.”
I call it building a better America.
My plan to fight inflation will lower your costs and lower the deficit.
17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And heres the plan:
First cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis.
He and his Dad both have Type 1 diabetes, which means they need insulin every day. Insulin costs about $10 a vial to make.
But drug companies charge families like Joshua and his Dad up to 30 times more. I spoke with Joshuas mom.
Imagine what its like to look at your child who needs insulin and have no idea how youre going to pay for it.
What it does to your dignity, your ability to look your child in the eye, to be the parent you expect to be.
Joshua is here with us tonight. Yesterday was his birthday. Happy birthday, buddy.
For Joshua, and for the 200,000 other young people with Type 1 diabetes, lets cap the cost of insulin at $35 a month so everyone can afford it.
Drug companies will still do very well. And while were at it let Medicare negotiate lower prices for prescription drugs, like the VA already does.
Look, the American Rescue Plan is helping millions of families on Affordable Care Act plans save $2,400 a year on their health care premiums. Lets close the coverage gap and make those savings permanent.
Second cut energy costs for families an average of $500 a year by combatting climate change.
Lets provide investments and tax credits to weatherize your homes and businesses to be energy efficient and you get a tax credit; double Americas clean energy production in solar, wind, and so much more; lower the price of electric vehicles, saving you another $80 a month because youll never have to pay at the gas pump again.
Third cut the cost of child care. Many families pay up to $14,000 a year for child care per child.
Middle-class and working families shouldnt have to pay more than 7% of their income for care of young children.
My plan will cut the cost in half for most families and help parents, including millions of women, who left the workforce during the pandemic because they couldnt afford child care, to be able to get back to work.
My plan doesnt stop there. It also includes home and long-term care. More affordable housing. And Pre-K for every 3- and 4-year-old.
All of these will lower costs.
And under my plan, nobody earning less than $400,000 a year will pay an additional penny in new taxes. Nobody.
The one thing all Americans agree on is that the tax system is not fair. We have to fix it.
Im not looking to punish anyone. But lets make sure corporations and the wealthiest Americans start paying their fair share.
Just last year, 55 Fortune 500 corporations earned $40 billion in profits and paid zero dollars in federal income tax.
Thats simply not fair. Thats why Ive proposed a 15% minimum tax rate for corporations.
We got more than 130 countries to agree on a global minimum tax rate so companies cant get out of paying their taxes at home by shipping jobs and factories overseas.
Thats why Ive proposed closing loopholes so the very wealthy dont pay a lower tax rate than a teacher or a firefighter.
So thats my plan. It will grow the economy and lower costs for families.
So what are we waiting for? Lets get this done. And while youre at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation.
My plan will not only lower costs to give families a fair shot, it will lower the deficit.
The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted.
But in my administration, the watchdogs have been welcomed back.
Were going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.
And tonight, Im announcing that the Justice Department will name a chief prosecutor for pandemic fraud.
By the end of this year, the deficit will be down to less than half what it was before I took office.
The only president ever to cut the deficit by more than one trillion dollars in a single year.
Lowering your costs also means demanding more competition.
Im a capitalist, but capitalism without competition isnt capitalism.
Its exploitation—and it drives up prices.
When corporations dont have to compete, their profits go up, your prices go up, and small businesses and family farmers and ranchers go under.
We see it happening with ocean carriers moving goods in and out of America.
During the pandemic, these foreign-owned companies raised prices by as much as 1,000% and made record profits.
Tonight, Im announcing a crackdown on these companies overcharging American businesses and consumers.
And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up.
That ends on my watch.
Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect.
Well also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees.
Lets pass the Paycheck Fairness Act and paid leave.
Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty.
Lets increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls Americas best-kept secret: community colleges.
And lets pass the PRO Act when a majority of workers want to form a union—they shouldnt be stopped.
When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we havent done in a long time: build a better America.
For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation.
And I know youre tired, frustrated, and exhausted.
But I also know this.
Because of the progress weve made, because of your resilience and the tools we have, tonight I can say
we are moving forward safely, back to more normal routines.
Weve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July.
Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines.
Under these new guidelines, most Americans in most of the country can now be mask free.
And based on the projections, more of the country will reach that point across the next couple of weeks.
Thanks to the progress we have made this past year, COVID-19 need no longer control our lives.
I know some are talking about “living with COVID-19”. Tonight I say that we will never just accept living with COVID-19.
We will continue to combat the virus as we do other diseases. And because this is a virus that mutates and spreads, we will stay on guard.
Here are four common sense steps as we move forward safely.
First, stay protected with vaccines and treatments. We know how incredibly effective vaccines are. If youre vaccinated and boosted you have the highest degree of protection.
We will never give up on vaccinating more Americans. Now, I know parents with kids under 5 are eager to see a vaccine authorized for their children.
The scientists are working hard to get that done and well be ready with plenty of vaccines when they do.
Were also ready with anti-viral treatments. If you get COVID-19, the Pfizer pill reduces your chances of ending up in the hospital by 90%.
Weve ordered more of these pills than anyone in the world. And Pfizer is working overtime to get us 1 Million pills this month and more than double that next month.
And were launching the “Test to Treat” initiative so people can get tested at a pharmacy, and if theyre positive, receive antiviral pills on the spot at no cost.
If youre immunocompromised or have some other vulnerability, we have treatments and free high-quality masks.
Were leaving no one behind or ignoring anyones needs as we move forward.
And on testing, we have made hundreds of millions of tests available for you to order for free.
Even if you already ordered free tests tonight, I am announcing that you can order more from covidtests.gov starting next week.
Second we must prepare for new variants. Over the past year, weve gotten much better at detecting new variants.
If necessary, well be able to deploy new vaccines within 100 days instead of many more months or years.
And, if Congress provides the funds we need, well have new stockpiles of tests, masks, and pills ready if needed.
I cannot promise a new variant wont come. But I can promise you well do everything within our power to be ready if it does.
Third we can end the shutdown of schools and businesses. We have the tools we need.
Its time for Americans to get back to work and fill our great downtowns again. People working from home can feel safe to begin to return to the office.
Were doing that here in the federal government. The vast majority of federal workers will once again work in person.
Our schools are open. Lets keep it that way. Our kids need to be in school.
And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely.
We achieved this because we provided free vaccines, treatments, tests, and masks.
Of course, continuing this costs money.
I will soon send Congress a request.
The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.
Fourth, we will continue vaccinating the world.
Weve sent 475 Million vaccine doses to 112 countries, more than any other nation.
And we wont stop.
We have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life.
Lets use this moment to reset. Lets stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease.
Lets stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans.
We cant change how divided weve been. But we can change how we move forward—on COVID-19 and other issues we must face together.
I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera.
They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun.
Officer Mora was 27 years old.
Officer Rivera was 22.
Both Dominican Americans whod grown up on the same streets they later chose to patrol as police officers.
I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
Ive worked on these issues a long time.
I know what works: Investing in crime prevention and community police officers wholl walk the beat, wholl know the neighborhood, and who can restore trust and safety.
So lets not abandon our streets. Or choose between safety and equal justice.
Lets come together to protect our communities, restore trust, and hold law enforcement accountable.
Thats why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers.
Thats why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope.
We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities.
I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe.
And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and cant be traced.
And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon?
Ban assault weapons and high-capacity magazines.
Repeal the liability shield that makes gun manufacturers the only industry in America that cant be sued.
These laws dont infringe on the Second Amendment. They save lives.
The most fundamental right in America is the right to vote and to have it counted. And its under assault.
In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections.
We cannot let this happen.
Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections.
Tonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.
One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.
And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence.
A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since shes been nominated, shes received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.
And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.
We can do both. At our border, weve installed new technology like cutting-edge scanners to better detect drug smuggling.
Weve set up joint patrols with Mexico and Guatemala to catch more human traffickers.
Were putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster.
Were securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.
We can do all this while keeping lit the torch of liberty that has led generations of immigrants to this land—my forefathers and so many of yours.
Provide a pathway to citizenship for Dreamers, those on temporary status, farm workers, and essential workers.
Revise our laws so businesses have the workers they need and families dont wait decades to reunite.
Its not only the right thing to do—its the economically smart thing to do.
Thats why immigration reform is supported by everyone from labor unions to religious leaders to the U.S. Chamber of Commerce.
Lets get it done once and for all.
Advancing liberty and justice also requires protecting the rights of women.
The constitutional right affirmed in Roe v. Wade—standing precedent for half a century—is under attack as never before.
If we want to go forward—not backward—we must protect access to health care. Preserve a womans right to choose. And lets continue to advance maternal health care in America.
And for our LGBTQ+ Americans, lets finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong.
As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential.
While it often appears that we never agree, that isnt true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.
And soon, well strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things.
So tonight Im offering a Unity Agenda for the Nation. Four big things we can do together.
First, beat the opioid epidemic.
There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.
Get rid of outdated rules that stop doctors from prescribing treatments. And stop the flow of illicit drugs by working with state and local law enforcement to go after traffickers.
If youre suffering from addiction, know you are not alone. I believe in recovery, and I celebrate the 23 million Americans in recovery.
Second, lets take on mental health. Especially among our children, whose lives and education have been turned upside down.
The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning.
I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor.
Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media.
As Frances Haugen, who is here with us tonight, has shown, we must hold social media platforms accountable for the national experiment theyre conducting on our children for profit.
Its time to strengthen privacy protections, ban targeted advertising to children, demand tech companies stop collecting personal data on our children.
And lets get all Americans the mental health services they need. More people they can turn to for help, and full parity between physical and mental health care.
Third, support our veterans.
Veterans are the best of us.
Ive always believed that we have a sacred obligation to equip all those we send to war and care for them and their families when they come home.
My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free.
Our troops in Iraq and Afghanistan faced many dangers.
One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more.
When they came home, many of the worlds fittest and best trained warriors were never the same.
Headaches. Numbness. Dizziness.
A cancer that would put them in a flag-draped coffin.
I know.
One of those soldiers was my son Major Beau Biden.
We dont know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops.
But Im committed to finding out everything we can.
Committed to military families like Danielle Robinson from Ohio.
The widow of Sergeant First Class Heath Robinson.
He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq.
Stationed near Baghdad, just yards from burn pits the size of football fields.
Heaths widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.
But cancer from prolonged exposure to burn pits ravaged Heaths lungs and body.
Danielle says Heath was a fighter to the very end.
He didnt know how to stop fighting, and neither did she.
Through her pain she found purpose to demand we do better.
Tonight, Danielle—we are.
The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits.
And tonight, Im announcing were expanding eligibility to veterans suffering from nine respiratory cancers.
Im also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve.
And fourth, lets end cancer as we know it.
This is personal to me and Jill, to Kamala, and to so many of you.
Cancer is the #2 cause of death in Americasecond only to heart disease.
Last month, I announced our plan to supercharge
the Cancer Moonshot that President Obama asked me to lead six years ago.
Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases.
More support for patients and families.
To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health.
Its based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more.
ARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimers, diabetes, and more.
A unity agenda for the nation.
We can do this.
My fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy.
In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things.
We have fought for freedom, expanded liberty, defeated totalitarianism and terror.
And built the strongest, freest, and most prosperous nation the world has ever known.
Now is the hour.
Our moment of responsibility.
Our test of resolve and conscience, of history itself.
It is in this moment that our character is formed. Our purpose is found. Our future is forged.
Well I know this nation.
We will meet the test.
To protect freedom and liberty, to expand fairness and opportunity.
We will save democracy.
As hard as these times have been, I am more optimistic about America today than I have been my whole life.
Because I see the future that is within our grasp.
Because I know there is simply nothing beyond our capacity.
We are the only nation on Earth that has always turned every crisis we have faced into an opportunity.
The only nation that can be defined by a single word: possibilities.
So on this night, in our 245th year as a nation, I have come to report on the State of the Union.
And my report is this: the State of the Union is strong—because you, the American people, are strong.
We are stronger today than we were a year ago.
And we will be stronger a year from now than we are today.
Now is our moment to meet and overcome the challenges of our time.
And we will, as one people.
One America.
The United States of America.
May God bless you all. May God protect our troops.

View File

@@ -7,9 +7,7 @@
"source": [
"# Images\n",
"\n",
"This covers how to load images into a document format that we can use downstream with other LangChain modules.\n",
"\n",
"It uses [Unstructured](https://unstructured.io/) to handle a wide variety of image formats, such as `.jpg` and `.png`. Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
"This covers how to load images such as `JPG` or `PNG` into a document format that we can use downstream."
]
},
{
@@ -29,35 +27,63 @@
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
"%pip install --upgrade --quiet pdfminer"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "0cc0cd42",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_community.document_loaders.image import UnstructuredImageLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "0cc0cd42",
"id": "082d557c",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"loader = UnstructuredImageLoader(\"layout-parser-paper-fast.jpg\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "df11c953",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "4284d44c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='2021\\n\\n2103.15348v2 [cs.CV] 21 Jun\\n\\narXiv\\n\\nLayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejiang Shen! (&4), Ruochen Zhang?, Melissa Dell?, Benjamin Charles Germain Lee*, Jacob Carlson?, and Weining Li?\\n\\n1\\n\\nAllen Institute for AI shannons@allenai.org ? Brown University ruochen_zhang@brown. edu 3 Harvard University {melissadell, jacob_carlson}@fas.harvard.edu 4 University of Washington begl@cs.washington.edu 5 University of Waterloo w4221i@uwaterloo.ca\\n\\nAbstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https: //layout-parser.github. io.\\n\\nKeywords: Document Image Analysis - Deep Learning - Layout Analysis - Character Recognition - Open Source library - Toolkit.\\n\\n1 Introduction\\n\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11,', metadata={'source': './example_data/layout-parser-paper-screenshot.png'})"
"Document(page_content=\"LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\n\\n\\nZxjiang Shen' (F3}, Ruochen Zhang, Melissa Dell*, Benjamin Charles Germain\\nLeet, Jacob Carlson, and Weining LiF\\n\\n\\nsugehen\\n\\nshangthrows, et\\n\\nAbstract. Recent advanocs in document image analysis (DIA) have been\\npimarliy driven bythe application of neural networks dell roar\\n{uteomer could be aly deployed in production and extended fo farther\\n[nvetigtion. However, various factory ke lcely organize codebanee\\nsnd sophisticated modal cnigurations compat the ey ree of\\nerin! innovation by wide sence, Though there have been sng\\nHors to improve reuablty and simplify deep lees (DL) mode\\naon, sone of them ae optimized for challenge inthe demain of DIA,\\nThis roprscte a major gap in the extng fol, sw DIA i eal to\\nscademic research acon wie range of dpi in the social ssencee\\n[rary for streamlining the sage of DL in DIA research and appicn\\ntons The core LayoutFaraer brary comes with a sch of simple and\\nIntative interfaee or applying and eutomiing DI. odel fr Inyo de\\npltfom for sharing both protrined modes an fal document dist\\n{ation pipeline We demonutate that LayootPareer shea fr both\\nlightweight and lrgeseledgtieation pipelines in eal-word uae ces\\nThe leary pblely smal at Btspe://layost-pareergsthab So\\n\\n\\n\\nKeywords: Document Image Analysis» Deep Learning Layout Analysis\\nCharacter Renguition - Open Serres dary « Tol\\n\\n\\nIntroduction\\n\\n\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndoctiment image analysis (DIA) tea including document image clasiffeation [I]\\n\", lookup_str='', metadata={'source': 'layout-parser-paper-fast.jpg'}, lookup_index=0)"
]
},
"execution_count": 2,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders.image import UnstructuredImageLoader\n",
"\n",
"loader = UnstructuredImageLoader(\"./example_data/layout-parser-paper-screenshot.png\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data[0]"
]
},
@@ -68,33 +94,47 @@
"source": [
"### Retain Elements\n",
"\n",
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can keep that separation by specifying `mode=\"elements\"`."
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`."
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "0fab833b",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredImageLoader(\"layout-parser-paper-fast.jpg\", mode=\"elements\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "c3e8ff1b",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "43c23d2d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='2021', metadata={'source': './example_data/layout-parser-paper-screenshot.png', 'coordinates': {'points': ((47.0, 492.0), (47.0, 591.0), (83.0, 591.0), (83.0, 492.0)), 'system': 'PixelSpace', 'layout_width': 1624, 'layout_height': 1920}, 'last_modified': '2024-07-01T10:38:29', 'filetype': 'PNG', 'languages': ['eng'], 'page_number': 1, 'file_directory': './example_data', 'filename': 'layout-parser-paper-screenshot.png', 'category': 'UncategorizedText'})"
"Document(page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\n', lookup_str='', metadata={'source': 'layout-parser-paper-fast.jpg', 'filename': 'layout-parser-paper-fast.jpg', 'page_number': 1, 'category': 'Title'}, lookup_index=0)"
]
},
"execution_count": 3,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = UnstructuredImageLoader(\n",
" \"./example_data/layout-parser-paper-screenshot.png\", mode=\"elements\"\n",
")\n",
"\n",
"data = loader.load()\n",
"\n",
"data[0]"
]
}
@@ -115,7 +155,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.6"
}
},
"nbformat": 4,

View File

@@ -7,19 +7,17 @@
"source": [
"# Microsoft Excel\n",
"\n",
"The `UnstructuredExcelLoader` is used to load `Microsoft Excel` files. The loader works with both `.xlsx` and `.xls` files. The page content will be the raw text of the Excel file. If you use the loader in `\"elements\"` mode, an HTML representation of the Excel file will be available in the document metadata under the `text_as_html` key.\n",
"\n",
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
"The `UnstructuredExcelLoader` is used to load `Microsoft Excel` files. The loader works with both `.xlsx` and `.xls` files. The page content will be the raw text of the Excel file. If you use the loader in `\"elements\"` mode, an HTML representation of the Excel file will be available in the document metadata under the `text_as_html` key."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0b01ee46",
"execution_count": 1,
"id": "e6616e3a",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-community unstructured openpyxl"
"from langchain_community.document_loaders import UnstructuredExcelLoader"
]
},
{
@@ -28,20 +26,10 @@
"id": "a654e4d9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"4\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Stanley Cups', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n13\\n\\n\\n', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': '17e9a90f9616f2abed8cf32b5bd3810d', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'}),\n",
" Document(page_content='Stanley Cups Since 67', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n0\\n\\n\\n', metadata={'source': './example_data/stanley-cups.xlsx', 'file_directory': './example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>0</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': 'ee34bd8c186b57e3530d5443ffa58122', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'})]"
"Document(page_content='\\n \\n \\n Team\\n Location\\n Stanley Cups\\n \\n \\n Blues\\n STL\\n 1\\n \\n \\n Flyers\\n PHI\\n 2\\n \\n \\n Maple Leafs\\n TOR\\n 13\\n \\n \\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'filename': 'stanley-cups.xlsx', 'file_directory': 'example_data', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'page_number': 1, 'page_name': 'Stanley Cups', 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'category': 'Table'})"
]
},
"execution_count": 2,
@@ -50,14 +38,9 @@
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredExcelLoader\n",
"\n",
"loader = UnstructuredExcelLoader(\"./example_data/stanley-cups.xlsx\", mode=\"elements\")\n",
"loader = UnstructuredExcelLoader(\"example_data/stanley-cups.xlsx\", mode=\"elements\")\n",
"docs = loader.load()\n",
"\n",
"print(len(docs))\n",
"\n",
"docs"
"docs[0]"
]
},
{
@@ -93,7 +76,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence"
"%pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence"
]
},
{
@@ -132,7 +115,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.8.13"
}
},
"nbformat": 4,

View File

@@ -9,9 +9,7 @@
"\n",
">[Microsoft PowerPoint](https://en.wikipedia.org/wiki/Microsoft_PowerPoint) is a presentation program by Microsoft.\n",
"\n",
"This covers how to load `Microsoft PowerPoint` documents into a document format that we can use downstream.\n",
"\n",
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
"This covers how to load `Microsoft PowerPoint` documents into a document format that we can use downstream."
]
},
{
@@ -29,30 +27,60 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "721c48aa",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredPowerPointLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "9d3d0e35",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"loader = UnstructuredPowerPointLoader(\"example_data/fake-power-point.pptx\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "06073f91",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "c9adc5cb",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Adding a Bullet Slide\\n\\nFind the bullet slide layout\\n\\nUse _TextFrame.text for first bullet\\n\\nUse _TextFrame.add_paragraph() for subsequent bullets\\n\\nHere is a lot of text!\\n\\nHere is some text in a text box!', metadata={'source': './example_data/fake-power-point.pptx'})]"
"[Document(page_content='Adding a Bullet Slide\\n\\nFind the bullet slide layout\\n\\nUse _TextFrame.text for first bullet\\n\\nUse _TextFrame.add_paragraph() for subsequent bullets\\n\\nHere is a lot of text!\\n\\nHere is some text in a text box!', metadata={'source': 'example_data/fake-power-point.pptx'})]"
]
},
"execution_count": 2,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredPowerPointLoader\n",
"\n",
"loader = UnstructuredPowerPointLoader(\"./example_data/fake-power-point.pptx\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data"
]
},
@@ -68,14 +96,36 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 2,
"id": "064f9162",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredPowerPointLoader(\n",
" \"example_data/fake-power-point.pptx\", mode=\"elements\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "abefbbdb",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a547c534",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='Adding a Bullet Slide', metadata={'source': './example_data/fake-power-point.pptx', 'category_depth': 0, 'file_directory': './example_data', 'filename': 'fake-power-point.pptx', 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'category': 'Title'})"
"Document(page_content='Adding a Bullet Slide', lookup_str='', metadata={'source': 'example_data/fake-power-point.pptx'}, lookup_index=0)"
]
},
"execution_count": 4,
@@ -84,12 +134,6 @@
}
],
"source": [
"loader = UnstructuredPowerPointLoader(\n",
" \"./example_data/fake-power-point.pptx\", mode=\"elements\"\n",
")\n",
"\n",
"data = loader.load()\n",
"\n",
"data[0]"
]
},
@@ -165,7 +209,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.6"
}
},
"nbformat": 4,

View File

@@ -24,7 +24,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "7b80ea891",
"metadata": {},
"outputs": [],
@@ -34,45 +34,38 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 3,
"id": "7b80ea89",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': './example_data/fake.docx'})]"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"from langchain_community.document_loaders import Docx2txtLoader\n",
"\n",
"loader = Docx2txtLoader(\"./example_data/fake.docx\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data"
]
},
{
"cell_type": "markdown",
"id": "8d40727d",
"metadata": {},
"source": [
"## Using Unstructured\n",
"\n",
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
"from langchain_community.document_loaders import Docx2txtLoader"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "721c48aa",
"execution_count": 5,
"id": "99a12031",
"metadata": {},
"outputs": [],
"source": [
"loader = Docx2txtLoader(\"example_data/fake.docx\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "b92f68b0",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d83dd755",
"metadata": {},
"outputs": [
{
@@ -81,18 +74,71 @@
"[Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})]"
]
},
"execution_count": 3,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data"
]
},
{
"cell_type": "markdown",
"id": "8d40727d",
"metadata": {},
"source": [
"## Using Unstructured"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "721c48aa",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredWordDocumentLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "9d3d0e35",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "06073f91",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "c9adc5cb",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx'}, lookup_index=0)]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredWordDocumentLoader\n",
"\n",
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data"
]
},
@@ -111,23 +157,39 @@
"execution_count": 5,
"id": "064f9162",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredWordDocumentLoader(\"example_data/fake.docx\", mode=\"elements\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "abefbbdb",
"metadata": {},
"outputs": [],
"source": [
"data = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "a547c534",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': './example_data/fake.docx', 'category_depth': 0, 'file_directory': './example_data', 'filename': 'fake.docx', 'last_modified': '2023-12-19T13:42:18', 'languages': ['por', 'cat'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'Title'})"
"Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx', 'filename': 'fake.docx', 'category': 'Title'}, lookup_index=0)"
]
},
"execution_count": 5,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = UnstructuredWordDocumentLoader(\"./example_data/fake.docx\", mode=\"elements\")\n",
"\n",
"data = loader.load()\n",
"\n",
"data[0]"
]
},
@@ -201,7 +263,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.6"
}
},
"nbformat": 4,

View File

@@ -19,21 +19,29 @@
"execution_count": 1,
"id": "e6616e3a",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredODTLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "a654e4d9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.odt', 'category_depth': 0, 'file_directory': 'example_data', 'filename': 'fake.odt', 'last_modified': '2023-12-19T13:42:18', 'languages': ['por', 'cat'], 'filetype': 'application/vnd.oasis.opendocument.text', 'category': 'Title'})"
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.odt', 'filename': 'example_data/fake.odt', 'category': 'Title'})"
]
},
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredODTLoader\n",
"\n",
"loader = UnstructuredODTLoader(\"example_data/fake.odt\", mode=\"elements\")\n",
"docs = loader.load()\n",
"docs[0]"
@@ -64,7 +72,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.6"
}
},
"nbformat": 4,

View File

@@ -22,23 +22,35 @@
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredOrgModeLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredOrgModeLoader(file_path=\"example_data/README.org\", mode=\"elements\")\n",
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"page_content='Example Docs' metadata={'source': './example_data/README.org', 'category_depth': 0, 'last_modified': '2023-12-19T13:42:18', 'languages': ['eng'], 'filetype': 'text/org', 'file_directory': './example_data', 'filename': 'README.org', 'category': 'Title'}\n"
"page_content='Example Docs' metadata={'source': 'example_data/README.org', 'filename': 'README.org', 'file_directory': 'example_data', 'filetype': 'text/org', 'page_number': 1, 'category': 'Title'}\n"
]
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredOrgModeLoader\n",
"\n",
"loader = UnstructuredOrgModeLoader(\n",
" file_path=\"./example_data/README.org\", mode=\"elements\"\n",
")\n",
"docs = loader.load()\n",
"\n",
"print(docs[0])"
]
},
@@ -66,7 +78,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.8.13"
}
},
"nbformat": 4,

View File

@@ -22,21 +22,35 @@
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredRSTLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredRSTLoader(file_path=\"example_data/README.rst\", mode=\"elements\")\n",
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"page_content='Example Docs' metadata={'source': './example_data/README.rst', 'category_depth': 0, 'last_modified': '2023-12-19T13:42:18', 'languages': ['eng'], 'filetype': 'text/x-rst', 'file_directory': './example_data', 'filename': 'README.rst', 'category': 'Title'}\n"
"page_content='Example Docs' metadata={'source': 'example_data/README.rst', 'filename': 'README.rst', 'file_directory': 'example_data', 'filetype': 'text/x-rst', 'page_number': 1, 'category': 'Title'}\n"
]
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredRSTLoader\n",
"\n",
"loader = UnstructuredRSTLoader(file_path=\"./example_data/README.rst\", mode=\"elements\")\n",
"docs = loader.load()\n",
"\n",
"print(docs[0])"
]
},
@@ -64,7 +78,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.11.3"
}
},
"nbformat": 4,

View File

@@ -22,6 +22,27 @@
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders.tsv import UnstructuredTSVLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredTSVLoader(\n",
" file_path=\"example_data/mlb_teams_2012.csv\", mode=\"elements\"\n",
")\n",
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
@@ -30,9 +51,6 @@
"<table border=\"1\" class=\"dataframe\">\n",
" <tbody>\n",
" <tr>\n",
" <td>Team, \"Payroll (millions)\", \"Wins\"</td>\n",
" </tr>\n",
" <tr>\n",
" <td>Nationals, 81.34, 98</td>\n",
" </tr>\n",
" <tr>\n",
@@ -128,13 +146,6 @@
}
],
"source": [
"from langchain_community.document_loaders.tsv import UnstructuredTSVLoader\n",
"\n",
"loader = UnstructuredTSVLoader(\n",
" file_path=\"./example_data/mlb_teams_2012.csv\", mode=\"elements\"\n",
")\n",
"docs = loader.load()\n",
"\n",
"print(docs[0].metadata[\"text_as_html\"])"
]
},
@@ -162,7 +173,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.8.13"
}
},
"nbformat": 4,

View File

@@ -7,31 +7,18 @@
"source": [
"# Unstructured File\n",
"\n",
"This notebook covers how to use `Unstructured` package to load files of many types. `Unstructured` currently supports loading of text files, powerpoints, html, pdfs, images, and more.\n",
"\n",
"Please see [this guide](/docs/integrations/providers/unstructured/) for more instructions on setting up Unstructured locally, including setting up required system dependencies."
"This notebook covers how to use `Unstructured` package to load files of many types. `Unstructured` currently supports loading of text files, powerpoints, html, pdfs, images, and more."
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "2886982e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.1\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"outputs": [],
"source": [
"# # Install package\n",
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
]
},
{
@@ -64,9 +51,39 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"id": "79d3e549",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredFileLoader"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2593d1dc",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "fe34e941",
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "ee449788",
"metadata": {},
"outputs": [
{
"data": {
@@ -74,18 +91,12 @@
"'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.\\n\\nLast year COVID-19 kept us apart. This year we are finally together again.\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.\\n\\nWith a duty to one another to the American people to the Constit'"
]
},
"execution_count": 3,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
"\n",
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[0].page_content[:400]"
]
},
@@ -99,28 +110,41 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "092d9a0b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'1/22/23, 6:30 PM - User 1: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!\\n\\n1/22/23, 8:24 PM - User 2: Goodmorning! $50 is too low.\\n\\n1/23/23, 2:59 AM - User 1: How much do you want?\\n\\n1/23/23, 3:00 AM - User 2: Online is at least $100\\n\\n1/23/23, 3:01 AM - User 2: Here is $129\\n\\n1/23/23, 3:01 AM - User 2: <Media omitted>\\n\\n1/23/23, 3:01 AM - User 1: Im not int'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"files = [\"./example_data/whatsapp_chat.txt\", \"./example_data/layout-parser-paper.pdf\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f841c4f8",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(files)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "993c240b",
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5ce4ff07",
"metadata": {},
"outputs": [],
"source": [
"files = [\"./example_data/whatsapp_chat.txt\", \"./example_data/layout-parser-paper.pdf\"]\n",
"\n",
"loader = UnstructuredFileLoader(files)\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[0].page_content[:400]"
]
},
@@ -136,32 +160,48 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 8,
"id": "ff5b616d",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\n",
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "feca3b6c",
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "fec5bbac",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
" Document(page_content='Last year COVID-19 kept us apart. This year we are finally together again.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
" Document(page_content='Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
" Document(page_content='With a duty to one another to the American people to the Constitution.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'UncategorizedText'}),\n",
" Document(page_content='And with an unwavering resolve that freedom will always triumph over tyranny.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'})]"
"[Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
" Document(page_content='Last year COVID-19 kept us apart. This year we are finally together again.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
" Document(page_content='Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
" Document(page_content='With a duty to one another to the American people to the Constitution.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n",
" Document(page_content='And with an unwavering resolve that freedom will always triumph over tyranny.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0)]"
]
},
"execution_count": 5,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = UnstructuredFileLoader(\n",
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[:5]"
]
},
@@ -177,35 +217,59 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 1,
"id": "767238a4",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredFileLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "9518b425",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\n",
" \"layout-parser-paper-fast.pdf\", strategy=\"fast\", mode=\"elements\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "645f29e9",
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "60685353",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
"[Document(page_content='1', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
" Document(page_content='0', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
" Document(page_content='2', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'UncategorizedText'}, lookup_index=0),\n",
" Document(page_content='n', lookup_str='', metadata={'source': 'layout-parser-paper-fast.pdf', 'filename': 'layout-parser-paper-fast.pdf', 'page_number': 1, 'category': 'Title'}, lookup_index=0)]"
]
},
"execution_count": 9,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
"\n",
"loader = UnstructuredFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\", strategy=\"fast\", mode=\"elements\"\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[5:10]"
"docs[:5]"
]
},
{
@@ -223,33 +287,59 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 1,
"id": "8ca8a648",
"metadata": {},
"outputs": [],
"source": [
"!wget https://raw.githubusercontent.com/Unstructured-IO/unstructured/main/example-docs/layout-parser-paper.pdf -P \"../../\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "686e5eb4",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c90f0e94",
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "6ec859d8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
"[Document(page_content='LayoutParser : A Unified Toolkit for Deep Learning Based Document Image Analysis', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
" Document(page_content='Zejiang Shen 1 ( (ea)\\n ), Ruochen Zhang 2 , Melissa Dell 3 , Benjamin Charles Germain Lee 4 , Jacob Carlson 3 , and Weining Li 5', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
" Document(page_content='Allen Institute for AI shannons@allenai.org', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
" Document(page_content='Brown University ruochen zhang@brown.edu', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0),\n",
" Document(page_content='Harvard University { melissadell,jacob carlson } @fas.harvard.edu', lookup_str='', metadata={'source': '../../layout-parser-paper.pdf'}, lookup_index=0)]"
]
},
"execution_count": 12,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = UnstructuredFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[5:10]"
"docs[:5]"
]
},
{
@@ -262,38 +352,62 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 2,
"id": "112e5538",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
"from unstructured.cleaners.core import clean_extra_whitespace"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "b9c5ac8d",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\",\n",
" mode=\"elements\",\n",
" post_processors=[clean_extra_whitespace],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "c44d5def",
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "b6f27929",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
"[Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'Title'}),\n",
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'UncategorizedText'}),\n",
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'UncategorizedText'}),\n",
" Document(page_content='1 2 0 2', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 213.36), (16.34, 253.36), (36.34, 253.36), (36.34, 213.36)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'UncategorizedText'}),\n",
" Document(page_content='n u J', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 258.36), (16.34, 286.14), (36.34, 286.14), (36.34, 258.36)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'filename': 'layout-parser-paper.pdf', 'file_directory': './example_data', 'filetype': 'application/pdf', 'page_number': 1, 'category': 'Title'})]"
]
},
"execution_count": 14,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
"from unstructured.cleaners.core import clean_extra_whitespace\n",
"\n",
"loader = UnstructuredFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\",\n",
" mode=\"elements\",\n",
" post_processors=[clean_extra_whitespace],\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[5:10]"
"docs[:5]"
]
},
{
@@ -306,6 +420,39 @@
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. You can generate a free Unstructured API key [here](https://www.unstructured.io/api-key/). The [Unstructured documentation](https://unstructured-io.github.io/unstructured/) page will have instructions on how to generate an API key once theyre available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if youd like to self-host the Unstructured API or run it locally."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "b50c70bc",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredAPIFileLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "12b6d2cf",
"metadata": {},
"outputs": [],
"source": [
"filenames = [\"example_data/fake.docx\", \"example_data/fake-email.eml\"]"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "39a9894d",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredAPIFileLoader(\n",
" file_path=filenames[0],\n",
" api_key=\"FAKE_API_KEY\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
@@ -324,15 +471,6 @@
}
],
"source": [
"from langchain_community.document_loaders import UnstructuredAPIFileLoader\n",
"\n",
"filenames = [\"example_data/fake.docx\", \"example_data/fake-email.eml\"]\n",
"\n",
"loader = UnstructuredAPIFileLoader(\n",
" file_path=filenames[0],\n",
" api_key=\"FAKE_API_KEY\",\n",
")\n",
"\n",
"docs = loader.load()\n",
"docs[0]"
]
@@ -345,6 +483,19 @@
"You can also batch multiple files through the Unstructured API in a single API using `UnstructuredAPIFileLoader`."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "79a18e7e",
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredAPIFileLoader(\n",
" file_path=filenames,\n",
" api_key=\"FAKE_API_KEY\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
@@ -363,11 +514,6 @@
}
],
"source": [
"loader = UnstructuredAPIFileLoader(\n",
" file_path=filenames,\n",
" api_key=\"FAKE_API_KEY\",\n",
")\n",
"\n",
"docs = loader.load()\n",
"docs[0]"
]
@@ -397,7 +543,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.0"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

View File

@@ -17,10 +17,29 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import UnstructuredXMLLoader\n",
"\n",
"from langchain_community.document_loaders import UnstructuredXMLLoader"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "a654e4d9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field', metadata={'source': 'example_data/factbook.xml'})"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = UnstructuredXMLLoader(\n",
" \"./example_data/factbook.xml\",\n",
" \"example_data/factbook.xml\",\n",
")\n",
"docs = loader.load()\n",
"docs[0]"

View File

@@ -19,12 +19,12 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet html2text"
"%pip install --upgrade --quiet html2text"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "8ca0974b",
"metadata": {},
"outputs": [
@@ -32,8 +32,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n",
"Fetching pages: 100%|##########| 2/2 [00:00<00:00, 14.74it/s]\n"
"Fetching pages: 100%|############| 2/2 [00:00<00:00, 10.75it/s]\n"
]
}
],
@@ -47,107 +46,66 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 1,
"id": "ddf2be97",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_transformers import Html2TextTransformer"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "a95a928c",
"metadata": {},
"outputs": [],
"source": [
"urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n",
"html2text = Html2TextTransformer()\n",
"docs_transformed = html2text.transform_documents(docs)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "18ef9fe9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"## Fantasy\n",
"\n",
" * Football\n",
"\n",
" * Baseball\n",
"\n",
" * Basketball\n",
"\n",
" * Hockey\n",
"\n",
"## ESPN Sites\n",
"\n",
" * ESPN Deportes\n",
"\n",
" * Andscape\n",
"\n",
" * espnW\n",
"\n",
" * ESPNFC\n",
"\n",
" * X Games\n",
"\n",
" * SEC Network\n",
"\n",
"## ESPN Apps\n",
"\n",
" * ESPN\n",
"\n",
" * ESPN Fantasy\n",
"\n",
" * Tournament Challenge\n",
"\n",
"## Follow ESPN\n",
"\n",
" * Facebook\n",
"\n",
" * X/Twitter\n",
"\n",
" * Instagram\n",
"\n",
" * Snapchat\n",
"\n",
" * TikTok\n",
"\n",
" * YouTube\n",
"\n",
"## Fresh updates to our NBA mock draft: Everything we're hearing hours before\n",
"Round 1\n",
"\n",
"With hours until Round 1 begins (8 p.m. ET on ESPN and ABC), ESPN draft\n",
"insiders Jonathan Givony and Jeremy Woo have new intel on lottery picks and\n",
"more.\n",
"\n",
"2hJonathan Givony and Jeremy Woo\n",
"\n",
"Illustration by ESPN\n",
"\n",
"## From No. 1 to 100: Ranking the 2024 NBA draft prospects\n",
"\n",
"Who's No. 1? Where do the Kentucky, Duke and UConn players rank? Here's our\n",
"final Top 100 Big Board.\n",
"\n",
"6hJonathan Givony and Jeremy Woo\n",
"\n",
" * Full draft order: All 58 picks over two rounds\n",
" * Trade tracker: Details for all deals\n",
"\n",
" * Betting buzz: Lakers favorites to draft Bronny\n",
" * Use our NBA draft simu\n",
"ent system, LLM functions as the agent's brain,\n",
"complemented by several key components:\n",
"\n",
" * **Planning**\n",
" * Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\n",
" * Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\n",
" * **Memory**\n",
" * Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\n",
" * Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\n",
" * **Tool use**\n",
" * The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including \n"
]
"data": {
"text/plain": [
"\" * ESPNFC\\n\\n * X Games\\n\\n * SEC Network\\n\\n## ESPN Apps\\n\\n * ESPN\\n\\n * ESPN Fantasy\\n\\n## Follow ESPN\\n\\n * Facebook\\n\\n * Twitter\\n\\n * Instagram\\n\\n * Snapchat\\n\\n * YouTube\\n\\n * The ESPN Daily Podcast\\n\\n2023 FIFA Women's World Cup\\n\\n## Follow live: Canada takes on Nigeria in group stage of Women's World Cup\\n\\n2m\\n\\nEPA/Morgan Hancock\\n\\n## TOP HEADLINES\\n\\n * Snyder fined $60M over findings in investigation\\n * NFL owners approve $6.05B sale of Commanders\\n * Jags assistant comes out as gay in NFL milestone\\n * O's alone atop East after topping slumping Rays\\n * ACC's Phillips: Never condoned hazing at NU\\n\\n * Vikings WR Addison cited for driving 140 mph\\n * 'Taking his time': Patient QB Rodgers wows Jets\\n * Reyna got U.S. assurances after Berhalter rehire\\n * NFL Future Power Rankings\\n\\n## USWNT AT THE WORLD CUP\\n\\n### USA VS. VIETNAM: 9 P.M. ET FRIDAY\\n\\n## How do you defend against Alex Morgan? Former opponents sound off\\n\\nThe U.S. forward is unstoppable at this level, scoring 121 goals and adding 49\""
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_transformers import Html2TextTransformer\n",
"\n",
"urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n",
"html2text = Html2TextTransformer()\n",
"docs_transformed = html2text.transform_documents(docs)\n",
"\n",
"print(docs_transformed[0].page_content[1000:2000])\n",
"\n",
"print(docs_transformed[1].page_content[1000:2000])"
"docs_transformed[0].page_content[1000:2000]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "6045d660",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"t's brain,\\ncomplemented by several key components:\\n\\n * **Planning**\\n * Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\n * Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n * **Memory**\\n * Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\n * Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n * **Tool use**\\n * The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution c\""
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"docs_transformed[1].page_content[1000:2000]"
]
}
],
@@ -167,7 +125,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.12"
}
},
"nbformat": 4,

View File

@@ -2147,32 +2147,6 @@
"llm(\"Tell me one joke\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## SingleStoreDB Semantic Cache\n",
"You can use [SingleStoreDB](https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/) as a semantic cache to cache prompts and responses."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d82f1bdc",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.cache import SingleStoreDBSemanticCache\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"set_llm_cache(\n",
" SingleStoreDBSemanticCache(\n",
" embedding=OpenAIEmbeddings(),\n",
" host=\"root:pass@localhost:3306/db\",\n",
" )\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ae1f5e1c-085e-4998-9f2d-b5867d2c3d5b",
@@ -2204,7 +2178,7 @@
"source": [
"**Cache** classes are implemented by inheriting the [BaseCache](https://api.python.langchain.com/en/latest/caches/langchain_core.caches.BaseCache.html) class.\n",
"\n",
"This table lists all 21 derived classes with links to the API Reference.\n",
"This table lists all 20 derived classes with links to the API Reference.\n",
"\n",
"\n",
"| Namespace 🔻 | Class |\n",
@@ -2221,7 +2195,6 @@
"| langchain_community.cache | [MomentoCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.MomentoCache.html) |\n",
"| langchain_community.cache | [OpenSearchSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.OpenSearchSemanticCache.html) |\n",
"| langchain_community.cache | [RedisSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.RedisSemanticCache.html) |\n",
"| langchain_community.cache | [SingleStoreDBSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SingleStoreDBSemanticCache.html) |\n",
"| langchain_community.cache | [SQLAlchemyCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLAlchemyCache.html) |\n",
"| langchain_community.cache | [SQLAlchemyMd5Cache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.SQLAlchemyMd5Cache.html) |\n",
"| langchain_community.cache | [UpstashRedisCache](https://api.python.langchain.com/en/latest/cache/langchain_community.cache.UpstashRedisCache.html) |\n",

View File

@@ -17,9 +17,7 @@
"source": [
"# AI21LLM\n",
"\n",
"This example goes over how to use LangChain to interact with `AI21` Jurassic models. To use the Jamba model, use the [ChatAI21 object](https://python.langchain.com/v0.2/docs/integrations/chat/ai21/) instead.\n",
"\n",
"[See a full list of AI21 models and tools on LangChain.](https://pypi.org/project/langchain-ai21/)\n",
"This example goes over how to use LangChain to interact with `AI21` models.\n",
"\n",
"## Installation"
]

View File

@@ -34,7 +34,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain_aws"
"%pip install --upgrade --quiet boto3"
]
},
{
@@ -45,9 +45,9 @@
},
"outputs": [],
"source": [
"from langchain_aws import BedrockLLM\n",
"from langchain_community.llms import Bedrock\n",
"\n",
"llm = BedrockLLM(\n",
"llm = Bedrock(\n",
" credentials_profile_name=\"bedrock-admin\", model_id=\"amazon.titan-text-express-v1\"\n",
")"
]
@@ -65,7 +65,7 @@
"metadata": {},
"outputs": [],
"source": [
"custom_llm = BedrockLLM(\n",
"custom_llm = Bedrock(\n",
" credentials_profile_name=\"bedrock-admin\",\n",
" provider=\"cohere\",\n",
" model_id=\"<Custom model ARN>\", # ARN like 'arn:aws:bedrock:...' obtained via provisioning the custom model\n",
@@ -108,7 +108,7 @@
"\n",
"\n",
"# Guardrails for Amazon Bedrock with trace\n",
"llm = BedrockLLM(\n",
"llm = Bedrock(\n",
" credentials_profile_name=\"bedrock-admin\",\n",
" model_id=\"<Model_ID>\",\n",
" model_kwargs={},\n",
@@ -134,7 +134,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.11.7"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

View File

@@ -14,13 +14,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {
"tags": []
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install --upgrade --quiet langchain-community gpt4all"
"%pip install --upgrade --quiet gpt4all > /dev/null"
]
},
{
@@ -39,7 +47,9 @@
},
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain_community.llms import GPT4All\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate"
]
},
@@ -82,71 +92,7 @@
"\n",
"For more info, visit https://github.com/nomic-ai/gpt4all.\n",
"\n",
"---\n",
"\n",
"This integration does not yet support streaming in chunks via the [`.stream()`](https://python.langchain.com/v0.2/docs/how_to/streaming/) method. The below example uses a callback handler with `streaming=True`:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"local_path = (\n",
" \"./models/Meta-Llama-3-8B-Instruct.Q4_0.gguf\" # replace with your local file path\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Token: Justin\n",
"Token: Bieber\n",
"Token: was\n",
"Token: born\n",
"Token: on\n",
"Token: March\n",
"Token: \n",
"Token: 1\n",
"Token: ,\n",
"Token: \n"
]
}
],
"source": [
"from langchain_core.callbacks import BaseCallbackHandler\n",
"\n",
"count = 0\n",
"\n",
"\n",
"class MyCustomHandler(BaseCallbackHandler):\n",
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
" global count\n",
" if count < 10:\n",
" print(f\"Token: {token}\")\n",
" count += 1\n",
"\n",
"\n",
"# Verbose is required to pass to the callback manager\n",
"llm = GPT4All(model=local_path, callbacks=[MyCustomHandler()], streaming=True)\n",
"\n",
"# If you want to use a custom model add the backend parameter\n",
"# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends\n",
"# llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, streaming=True)\n",
"\n",
"chain = prompt | llm\n",
"\n",
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
"\n",
"# Streamed tokens will be logged/aggregated via the passed callback\n",
"res = chain.invoke({\"question\": question})"
"---"
]
},
{
@@ -154,7 +100,56 @@
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
"source": [
"local_path = (\n",
" \"./models/ggml-gpt4all-l13b-snoozy.bin\" # replace with your desired local file path\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Callbacks support token-wise streaming\n",
"callbacks = [StreamingStdOutCallbackHandler()]\n",
"\n",
"# Verbose is required to pass to the callback manager\n",
"llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)\n",
"\n",
"# If you want to use a custom model add the backend parameter\n",
"# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends\n",
"llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Justin Bieber was born on March 1, 1994. In 1994, The Cowboys won Super Bowl XXVIII."
]
}
],
"metadata": {

View File

@@ -143,25 +143,6 @@
"print(chain.invoke({\"question\": question}))"
]
},
{
"cell_type": "markdown",
"id": "5141dc4d",
"metadata": {},
"source": [
"Streaming repsonse."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1819250-2db9-4143-b88a-12e92d4e2386",
"metadata": {},
"outputs": [],
"source": [
"for chunk in chain.stream(question):\n",
" print(chunk, end=\"\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "dbbc3a37",

View File

@@ -245,7 +245,7 @@
"source": [
"### Streaming\n",
"\n",
"You can use `stream` method to get a streaming of LLM output, "
"To get streaming of LLM output, you can create a Huggingface `TextIteratorStreamer` for `_forward_params`."
]
},
{
@@ -255,11 +255,24 @@
"metadata": {},
"outputs": [],
"source": [
"generation_config = {\"skip_prompt\": True, \"pipeline_kwargs\": {\"max_new_tokens\": 100}}\n",
"chain = prompt | ov_llm.bind(**generation_config)\n",
"from threading import Thread\n",
"\n",
"for chunk in chain.stream(question):\n",
" print(chunk, end=\"\", flush=True)"
"from transformers import TextIteratorStreamer\n",
"\n",
"streamer = TextIteratorStreamer(\n",
" ov_llm.pipeline.tokenizer,\n",
" timeout=30.0,\n",
" skip_prompt=True,\n",
" skip_special_tokens=True,\n",
")\n",
"pipeline_kwargs = {\"pipeline_kwargs\": {\"streamer\": streamer, \"max_new_tokens\": 100}}\n",
"chain = prompt | ov_llm.bind(**pipeline_kwargs)\n",
"\n",
"t1 = Thread(target=chain.invoke, args=({\"question\": question},))\n",
"t1.start()\n",
"\n",
"for new_text in streamer:\n",
" print(new_text, end=\"\", flush=True)"
]
},
{

View File

@@ -18,6 +18,16 @@ pip install langchain-community boto3
### Bedrock Chat
See a [usage example](/docs/integrations/chat/bedrock).
```python
from langchain_aws import ChatBedrock
```
## LLMs
### Bedrock
>[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of
> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`,
> `Meta`, `Stability AI`, and `Amazon` via a single API, along with a broad set of capabilities you need to
@@ -28,15 +38,6 @@ pip install langchain-community boto3
> serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy
> generative AI capabilities into your applications using the AWS services you are already familiar with.
See a [usage example](/docs/integrations/chat/bedrock).
```python
from langchain_aws import ChatBedrock
```
## LLMs
### Bedrock
See a [usage example](/docs/integrations/llms/bedrock).

View File

@@ -223,15 +223,9 @@ See a [usage example](/docs/integrations/document_loaders/microsoft_onenote).
from langchain_community.document_loaders.onenote import OneNoteLoader
```
## AI Agent Memory System
## Vector stores
[AI agent](https://learn.microsoft.com/en-us/azure/cosmos-db/ai-agents) needs robust memory systems that support multi-modality, offer strong operational performance, and enable agent memory sharing as well as separation.
AI agents can rely on Azure Cosmos DB as a unified [memory system](https://learn.microsoft.com/en-us/azure/cosmos-db/ai-agents#memory-can-make-or-break-agents) solution, enjoying speed, scale, and simplicity. This service successfully [enabled OpenAI's ChatGPT service](https://www.youtube.com/watch?v=6IIUtEFKJec&t) to scale dynamically with high reliability and low maintenance. Powered by an atom-record-sequence engine, it is the world's first globally distributed [NoSQL](https://learn.microsoft.com/en-us/azure/cosmos-db/distributed-nosql), [relational](https://learn.microsoft.com/en-us/azure/cosmos-db/distributed-relational), and [vector database](https://learn.microsoft.com/en-us/azure/cosmos-db/vector-database) service that offers a serverless mode.
Below are two available Azure Cosmos DB APIs that can provide vector store functionalities.
### Azure Cosmos DB for MongoDB (vCore)
### Azure Cosmos DB MongoDB vCore
>[Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/) makes it easy to create a database with full native MongoDB support.
> You can apply your MongoDB experience and continue to use your favorite MongoDB drivers, SDKs, and tools by pointing your application to the API for MongoDB vCore account's connection string.

View File

@@ -7,7 +7,7 @@ This page covers how to use the `GPT4All` wrapper within LangChain. The tutorial
- Install the Python package with `pip install gpt4all`
- Download a [GPT4All model](https://gpt4all.io/index.html) and place it in your desired directory
In this example, we are using `mistral-7b-openorca.Q4_0.gguf`:
In this example, We are using `mistral-7b-openorca.Q4_0.gguf`(Best overall fast chat model):
```bash
mkdir models
@@ -30,7 +30,7 @@ model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
response = model.invoke("Once upon a time, ")
```
You can also customize the generation parameters, such as `n_predict`, `temp`, `top_p`, `top_k`, and others.
You can also customize the generation parameters, such as n_predict, temp, top_p, top_k, and others.
To stream the model's predictions, add in a CallbackManager.
@@ -45,11 +45,11 @@ callbacks = [StreamingStdOutCallbackHandler()]
model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
# Generate text. Tokens are streamed through the callback manager.
model.invoke("Once upon a time, ", callbacks=callbacks)
model("Once upon a time, ", callbacks=callbacks)
```
## Model File
You can download model files from the GPT4All client. You can download the client from the [GPT4All](https://gpt4all.io/index.html) website.
You can find links to model file downloads in the [https://gpt4all.io/](https://gpt4all.io/index.html).
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/gpt4all)

View File

@@ -28,16 +28,6 @@ import os
os.environ["WATSONX_APIKEY"] = "your IBM watsonx.ai api key"
```
## Chat Model
### ChatWatsonx
See a [usage example](/docs/integrations/chat/ibm_watsonx).
```python
from langchain_ibm import ChatWatsonx
```
## LLMs
### WatsonxLLM

View File

@@ -7,19 +7,29 @@
"source": [
"# MLflow\n",
"\n",
">[MLflow](https://mlflow.org/) is a versatile, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
">[MLflow](https://www.mlflow.org/docs/latest/what-is-mlflow) is a versatile, expandable, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
"\n",
"In the context of LangChain integration, MLflow provides the following capabilities:\n",
"\n",
"- **Experiment Tracking**: MLflow tracks and stores artifacts from your LangChain experiments, including models, code, prompts, metrics, and more.\n",
"- **Dependency Management**: MLflow automatically records model dependencies, ensuring consistency between development and production environments.\n",
"- **Model Evaluation** MLflow offers native capabilities for evaluating LangChain applications.\n",
"- **Tracing**: MLflow allows you to visually trace data flows through your LangChain chain, agent, retriever, or other components.\n",
"\n",
"\n",
"**Note**: The tracing capability is only available in MLflow versions 2.14.0 and later.\n",
"\n",
"This notebook demonstrates how to track your LangChain experiments using MLflow. For more information about this feature and to explore tutorials and examples of using LangChain with MLflow, please refer to the [MLflow documentation for LangChain integration](https://mlflow.org/docs/latest/llms/langchain/index.html)."
"This notebook goes over how to track your LangChain experiments into your `MLflow Server`"
]
},
{
"cell_type": "markdown",
"id": "ea73efae-7182-4a89-a492-c865b1fcf981",
"metadata": {},
"source": [
"## External examples"
]
},
{
"cell_type": "markdown",
"id": "97361a84-4e8f-45ba-b291-814cf73cd8f2",
"metadata": {},
"source": [
"`MLflow` provides [several examples](https://github.com/mlflow/mlflow/tree/master/examples/langchain) for the `LangChain` integration:\n",
"- [simple_chain](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_chain.py)\n",
"- [simple_agent](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_agent.py)\n",
"- [retriever_chain](https://github.com/mlflow/mlflow/blob/master/examples/langchain/retriever_chain.py)\n",
"- [retrieval_qa_chain](https://github.com/mlflow/mlflow/blob/master/examples/langchain/retrieval_qa_chain.py)\n"
]
},
{
@@ -27,37 +37,7 @@
"id": "e0cbd74b-1542-45a4-a72b-b2eedeffd2e0",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"Install MLflow Python package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7fb27b941602401d91542211134fc71a",
"metadata": {},
"outputs": [],
"source": [
"%pip install google-search-results num"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "42406548",
"metadata": {},
"outputs": [],
"source": [
"%pip install mlflow -qU"
]
},
{
"cell_type": "markdown",
"id": "8e626bb4",
"metadata": {},
"source": [
"This example utilizes the OpenAI LLM. Feel free to skip the command below and proceed with a different LLM if desired."
"## Example"
]
},
{
@@ -67,535 +47,142 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install langchain-openai -qU"
"%pip install --upgrade --quiet azureml-mlflow\n",
"%pip install --upgrade --quiet pandas\n",
"%pip install --upgrade --quiet textstat\n",
"%pip install --upgrade --quiet spacy\n",
"%pip install --upgrade --quiet langchain-openai\n",
"%pip install --upgrade --quiet google-search-results\n",
"!python -m spacy download en_core_web_sm"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "7e87b21d",
"execution_count": null,
"id": "bf8e1f5c",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"# Set MLflow tracking URI if you have MLflow Tracking Server running\n",
"os.environ[\"MLFLOW_TRACKING_URI\"] = \"\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\""
]
},
{
"cell_type": "markdown",
"id": "84616d96",
"metadata": {},
"source": [
"To begin, let's create a dedicated MLflow experiment in order track our model and artifacts. While you can opt to skip this step and use the default experiment, we strongly recommend organizing your runs and artifacts into separate experiments to avoid clutter and maintain a clean, structured workflow."
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"os.environ[\"SERPAPI_API_KEY\"] = \"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "155d2a6f",
"metadata": {},
"outputs": [],
"source": [
"import mlflow\n",
"\n",
"mlflow.set_experiment(\"LangChain MLflow Integration\")"
]
},
{
"cell_type": "markdown",
"id": "48accc76",
"metadata": {},
"source": [
"## Overview\n",
"\n",
"Integrate MLflow with your LangChain Application using one of the following methods:\n",
"\n",
"1. **Autologging**: Enable seamless tracking with the `mlflow.langchain.autolog()` command, our recommended first option for leveraging the LangChain MLflow integration.\n",
"2. **Manual Logging**: Use MLflow APIs to log LangChain chains and agents, providing fine-grained control over what to track in your experiment.\n",
"3. **Custom Callbacks**: Pass MLflow callbacks manually when invoking chains, allowing for semi-automated customization of your workload, such as tracking specific invocations."
]
},
{
"cell_type": "markdown",
"id": "c3f10055",
"metadata": {},
"source": [
"## Scenario 1: MLFlow Autologging"
]
},
{
"cell_type": "markdown",
"id": "71118a27",
"metadata": {},
"source": [
"To get started with autologging, simply call `mlflow.langchain.autolog()`. In this example, we set the `log_models` parameter to `True`, which allows the chain definition and its dependency libraries to be recorded as an MLflow model, providing a comprehensive tracking experience."
]
},
{
"cell_type": "code",
"execution_count": 39,
"id": "5b08145f",
"metadata": {},
"outputs": [],
"source": [
"import mlflow\n",
"\n",
"mlflow.langchain.autolog(\n",
" # These are optional configurations to control what information should be logged automatically (default: False)\n",
" # For the full list of the arguments, refer to https://mlflow.org/docs/latest/llms/langchain/autologging.html#id1\n",
" log_models=True,\n",
" log_input_examples=True,\n",
" log_model_signatures=True,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "f0570c18",
"metadata": {},
"source": [
"### Define a Chain"
]
},
{
"cell_type": "code",
"execution_count": 40,
"id": "1b2627ef",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"parser = StrOutputParser()\n",
"\n",
"chain = prompt | llm | parser"
]
},
{
"cell_type": "markdown",
"id": "a5b38bae",
"metadata": {},
"source": [
"### Invoke the Chain\n",
"\n",
"Note that this step may take a few seconds longer than usual, as MLflow runs several background tasks in the background to log models, traces, and artifacts to the tracking server."
]
},
{
"cell_type": "code",
"execution_count": 41,
"id": "a1df4bc8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Ich liebe das Programmieren.'"
]
},
"execution_count": 41,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"test_input = {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
"}\n",
"\n",
"chain.invoke(test_input)"
]
},
{
"cell_type": "markdown",
"id": "5173cdd4",
"metadata": {},
"source": [
"Take a moment to explore the MLflow Tracking UI, where you can gain a deeper understanding of what information are being logged.\n",
"* **Traces** - Navigate to the \"Traces\" tab in the experiment and click the request ID link of the first row. The displayed trace tree visualizes the call stack of your chain invocation, providing you with a deep insight into how each component is executed within the chain.\n",
"* **MLflow Model** - As we set `log_model=True`, MLflow automatically creates an MLflow Run to track your chain definition. Navigate to the newest Run page and open the \"Artifacts\" tab, which lists file artifacts logged as an MLflow Model, including dependencies, input examples, model signatures, and more.\n"
]
},
{
"cell_type": "markdown",
"id": "36179573",
"metadata": {},
"source": [
"### Invoke the Logged Chain\n",
"\n",
"Next, let's load the model back and verify that we can reproduce the same prediction, ensuring consistency and reliability.\n",
"\n",
"There are two ways to load the model\n",
"1. `mlflow.langchain.load_model(MODEL_URI)` - This loads the model as the original LangChain object.\n",
"2. `mlflow.pyfunc.load_model(MODEL_URI)` - This loads the model within the `PythonModel` wrapper and encapsulates the prediction logic with the `predict()` API, which contains additional logic such as schema enforcement."
]
},
{
"cell_type": "code",
"execution_count": 42,
"id": "a8e39d72",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Ich liebe Programmieren.'"
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Replace YOUR_RUN_ID with the Run ID displayed on the MLflow UI\n",
"loaded_model = mlflow.langchain.load_model(\"runs:/{YOUR_RUN_ID}/model\")\n",
"loaded_model.invoke(test_input)"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "9619356d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['Ich liebe das Programmieren.']"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pyfunc_model = mlflow.pyfunc.load_model(\"runs:/{YOUR_RUN_ID}/model\")\n",
"pyfunc_model.predict(test_input)"
]
},
{
"cell_type": "markdown",
"id": "eb23a78c",
"metadata": {},
"source": [
"### Configure Autologging\n",
"\n",
"The `mlflow.langchain.autolog()` function offers several parameters that allow for fine-grained control over the artifacts logged to MLflow. For a comprehensive list of available configurations, please refer to the latest [MLflow LangChain Autologging Documentation](https://mlflow.org/docs/latest/llms/langchain/autologging.html)."
]
},
{
"cell_type": "markdown",
"id": "1bf6bb02",
"metadata": {},
"source": [
"## Scenario 2: Manually Logging an Agent from Code"
]
},
{
"cell_type": "markdown",
"id": "6e447a02",
"metadata": {},
"source": [
"\n",
"#### Prerequisites\n",
"\n",
"This example uses `SerpAPI`, a search engine API, as a tool for the agent to retrieve Google Search results. LangChain is natively integrated with `SerpAPI`, allowing you to configure the tool for your agent with just one line of code.\n",
"\n",
"To get started:\n",
"\n",
"* Install the required Python package via pip: `pip install google-search-results numexpr`.\n",
"* Create an account at [SerpAPI's Official Website](https://serpapi.com/) and retrieve an API key.\n",
"* Set the API key in the environment variable: `os.environ[\"SERPAPI_API_KEY\"] = \"YOUR_API_KEY\"`\n"
]
},
{
"cell_type": "markdown",
"id": "d0c914e3",
"metadata": {},
"source": [
"### Define an Agent\n",
"\n",
"In this example, we will log the agent definition **as code**, rather than directly feeding the Python object and saving it in a serialized format. This approach offers several benefits:\n",
"\n",
"1. **No serialization required**: By saving the model as code, we avoid the need for serialization, which can be problematic when working with components that don't natively support it. This approach also eliminates the risk of incompatibility issues when deserializing the model in a different environment.\n",
"2. **Better transparency**: By inspecting the saved code file, you can gain valuable insights into what the model does. This is in contrast to serialized formats like pickle, where the model's behavior remains opaque until it's loaded back, potentially exposing security risks such as remote code execution.\n"
]
},
{
"cell_type": "markdown",
"id": "9190a609",
"metadata": {},
"source": [
"First, create a separate `.py` file that defines the agent instance.\n",
"\n",
"In the interest of time, you can run the following cell to generate a Python file `agent.py`, which contains the agent definition code. In actual dev scenario, you would define it in another notebook or hand-crafted python script."
]
},
{
"cell_type": "code",
"execution_count": 64,
"id": "62b20e17",
"metadata": {},
"outputs": [],
"source": [
"script_content = \"\"\"\n",
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
"from langchain_openai import ChatOpenAI\n",
"import mlflow\n",
"\n",
"llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n",
"\n",
"# IMPORTANT: call set_model() to register the instance to be logged.\n",
"mlflow.models.set_model(agent)\n",
"\"\"\"\n",
"\n",
"with open(\"agent.py\", \"w\") as f:\n",
" f.write(script_content)"
]
},
{
"cell_type": "markdown",
"id": "82a21f06",
"metadata": {},
"source": [
"### Log the Agent\n",
"\n",
"Return to the original notebook and run the following cell to log the agent you've defined in the `agent.py` file.\n"
]
},
{
"cell_type": "code",
"execution_count": 51,
"id": "cd5b8bcc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The agent is successfully logged to MLflow!\n"
]
}
],
"source": [
"question = \"How long would it take to drive to the Moon with F1 racing cars?\"\n",
"\n",
"with mlflow.start_run(run_name=\"search-math-agent\") as run:\n",
" info = mlflow.langchain.log_model(\n",
" lc_model=\"agent.py\", # Specify the relative code path to the agent definition\n",
" artifact_path=\"model\",\n",
" input_example=question,\n",
" )\n",
"\n",
"print(\"The agent is successfully logged to MLflow!\")"
]
},
{
"cell_type": "markdown",
"id": "b4687052",
"metadata": {},
"source": [
"Now, open the MLflow UI and navigate to the \"Artifacts\" tab in the Run detail page. You should see that the `agent.py` file has been successfully logged, along with other model artifacts, such as dependencies, input examples, and more."
]
},
{
"cell_type": "markdown",
"id": "9011db62",
"metadata": {},
"source": [
"### Invoke the Logged Agent\n",
"\n",
"Now load the agent back and invoke it. There are two ways to load the model"
]
},
{
"cell_type": "code",
"execution_count": 53,
"id": "b634b69d",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Downloading artifacts: 100%|██████████| 10/10 [00:00<00:00, 331.57it/s]\n"
]
},
{
"data": {
"text/plain": [
"['It would take approximately 1194.5 hours to drive to the Moon with an F1 racing car.']"
]
},
"execution_count": 53,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Let's turn on the autologging with default configuration, so we can see the trace for the agent invocation.\n",
"mlflow.langchain.autolog()\n",
"\n",
"# Load the model back\n",
"agent = mlflow.pyfunc.load_model(info.model_uri)\n",
"\n",
"# Invoke\n",
"agent.predict(question)"
]
},
{
"cell_type": "markdown",
"id": "30bf6133",
"metadata": {},
"source": [
"Navigate to the **\"Traces\"** tab in the experiment and click the request ID link of the first row. The trace visualizes how the agent operate multiple tasks within the single prediction call:\n",
"1. Determine what subtasks are required to answer the questions.\n",
"2. Search for the speed of an F1 racing car.\n",
"3. Search for the distance from Earth to Moon.\n",
"4. Compute the division using LLM."
]
},
{
"cell_type": "markdown",
"id": "cbd10f34",
"metadata": {},
"source": [
"## Scenario 3. Using MLflow Callbacks\n",
"\n",
"**MLflow Callbacks** provide a semi-automated way to track your LangChain application in MLflow. There are two primary callbacks available:\n",
"\n",
"1. **`MlflowLangchainTracer`:** Primarily used for generating traces, available in `mlflow >= 2.14.0`.\n",
"2. **`MLflowCallbackHandler`:** Logs metrics and artifacts to the MLflow tracking server."
]
},
{
"cell_type": "markdown",
"id": "d013d309",
"metadata": {},
"source": [
"### MlflowLangchainTracer\n",
"\n",
"When the chain or agent is invoked with the `MlflowLangchainTracer` callback, MLflow will automatically generate a trace for the call stack and log it to the MLflow tracking server. The outcome is exactly same as `mlflow.langchain.autolog()`, but this is particularly useful when you want to only trace specific invocation. Autologging is applied to all invocation in the same notebook/script, on the other hand."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "46d48044",
"metadata": {},
"outputs": [],
"source": [
"from mlflow.langchain.langchain_tracer import MlflowLangchainTracer\n",
"\n",
"mlflow_tracer = MlflowLangchainTracer()\n",
"\n",
"# This call generates a trace\n",
"chain.invoke(test_input, config={\"callbacks\": [mlflow_tracer]})\n",
"\n",
"# This call does not generate a trace\n",
"chain.invoke(test_input)"
]
},
{
"cell_type": "markdown",
"id": "acb6692c",
"metadata": {},
"source": [
"#### Where to Pass the Callback\n",
" LangChain supports two ways of passing callback instances: (1) Request time callbacks - pass them to the `invoke` method or bind with `with_config()` (2) Constructor callbacks - set them in the chain constructor. When using the `MlflowLangchainTracer` as a callback, you **must use request time callbacks**. Setting it in the constructor instead will only apply the callback to the top-level object, preventing it from being propagated to child components, resulting in incomplete traces. For more information on this behavior, please refer to [Callbacks Documentation](https://python.langchain.com/v0.2/docs/concepts/#callbacks) for more details.\n",
"\n",
"```python\n",
"# OK\n",
"chain.invoke(test_input, config={\"callbacks\": [mlflow_tracer]})\n",
"chain.with_config(callbacks=[mlflow_tracer])\n",
"# NG\n",
"chain = TheNameOfSomeChain(callbacks=[mlflow_tracer])\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "d6a60ba7",
"metadata": {},
"source": [
"#### Supported Methods\n",
"\n",
"`MlflowLangchainTracer` supports the following invocation methods from the [Runnable Interfaces](https://python.langchain.com/v0.1/docs/expression_language/interface/).\n",
"- Standard interfaces: `invoke`, `stream`, `batch`\n",
"- Async interfaces: `astream`, `ainvoke`, `abatch`, `astream_log`, `astream_events`\n",
"\n",
"Other methods are not guaranteed to be fully compatible."
]
},
{
"cell_type": "markdown",
"id": "a72e8854",
"metadata": {},
"source": [
"### MlflowCallbackHandler\n",
"\n",
"`MlflowCallbackHandler` is a callback handler that resides in the LangChain Community code base.\n",
"\n",
"This callback can be passed for chain/agent invocation, but it must be explicitly finished by calling the `flush_tracker()` method.\n",
"\n",
"When a chain is invoked with the callback, it performs the following actions:\n",
"\n",
"1. Creates a new MLflow Run or retrieves an active one if available within the active MLflow Experiment.\n",
"2. Logs metrics such as the number of LLM calls, token usage, and other relevant metrics. If the chain/agent includes LLM call and you have `spacy` library installed, it logs text complexity metrics such as `flesch_kincaid_grade`.\n",
"3. Logs internal steps as a JSON file (this is a legacy version of traces).\n",
"4. Logs chain input and output as a Pandas Dataframe.\n",
"5. Calls the `flush_tracker()` method with a chain/agent instance, logging the chain/agent as an MLflow Model.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b579aae1",
"id": "fd49fd45",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.callbacks import MlflowCallbackHandler\n",
"\n",
"mlflow_callback = MlflowCallbackHandler()\n",
"\n",
"chain.invoke(\"What is LangChain callback?\", config={\"callbacks\": [mlflow_callback]})\n",
"\n",
"mlflow_callback.flush_tracker()"
"from langchain_openai import OpenAI"
]
},
{
"cell_type": "markdown",
"id": "84924e35",
"cell_type": "code",
"execution_count": null,
"id": "578cac8c",
"metadata": {},
"outputs": [],
"source": [
"## References\n",
"To learn more about the feature and visit tutorials and examples of using LangChain with MLflow, please refer to the [MLflow documentation for LangChain integration](https://mlflow.org/docs/latest/llms/langchain/index.html).\n",
"\"\"\"Main function.\n",
"\n",
"`MLflow` also provides several [tutorials](https://mlflow.org/docs/latest/llms/langchain/index.html#getting-started-with-the-mlflow-langchain-flavor-tutorials-and-guides) and [examples](https://github.com/mlflow/mlflow/tree/master/examples/langchain) for the `LangChain` integration:\n",
"- [Quick Start](https://mlflow.org/docs/latest/llms/langchain/notebooks/langchain-quickstart.html)\n",
"- [RAG Tutorial](https://mlflow.org/docs/latest/llms/langchain/notebooks/langchain-retriever.html)\n",
"- [Agent Example](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_agent.py)"
"This function is used to try the callback handler.\n",
"Scenarios:\n",
"1. OpenAI LLM\n",
"2. Chain with multiple SubChains on multiple generations\n",
"3. Agent with Tools\n",
"\"\"\"\n",
"mlflow_callback = MlflowCallbackHandler()\n",
"llm = OpenAI(\n",
" model_name=\"gpt-3.5-turbo\", temperature=0, callbacks=[mlflow_callback], verbose=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9b20acae",
"metadata": {},
"outputs": [],
"source": [
"# SCENARIO 1 - LLM\n",
"llm_result = llm.generate([\"Tell me a joke\"])\n",
"\n",
"mlflow_callback.flush_tracker(llm)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b872046",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1b2627ef",
"metadata": {},
"outputs": [],
"source": [
"# SCENARIO 2 - Chain\n",
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
"Title: {title}\n",
"Playwright: This is a synopsis for the above play:\"\"\"\n",
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=[mlflow_callback])\n",
"\n",
"test_prompts = [\n",
" {\n",
" \"title\": \"documentary about good video games that push the boundary of game design\"\n",
" },\n",
"]\n",
"synopsis_chain.apply(test_prompts)\n",
"mlflow_callback.flush_tracker(synopsis_chain)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e002823a",
"metadata": {
"id": "_jN73xcPVEpI"
},
"outputs": [],
"source": [
"from langchain.agents import AgentType, initialize_agent, load_tools"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "655bd47e",
"metadata": {
"id": "Gpq4rk6VT9cu"
},
"outputs": [],
"source": [
"# SCENARIO 3 - Agent with Tools\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=[mlflow_callback])\n",
"agent = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" callbacks=[mlflow_callback],\n",
" verbose=True,\n",
")\n",
"agent.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")\n",
"mlflow_callback.flush_tracker(agent, finish=True)"
]
}
],
@@ -604,9 +191,9 @@
"provenance": []
},
"kernelspec": {
"display_name": "tracing",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "tracing"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -618,7 +205,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
"version": "3.10.12"
}
},
"nbformat": 4,

View File

@@ -60,7 +60,6 @@
"**PebbloRetrievalQA chain supports the following vector databases:**\n",
"- Qdrant\n",
"- Pinecone\n",
"- Postgres(utilizing the pgvector extension)\n",
"\n",
"\n",
"**Load vector database with authorization and semantic information in metadata:**\n",

View File

@@ -14,18 +14,14 @@ its dependencies running locally.
- Install the Python SDK with `pip install unstructured`.
- You can install document specific dependencies with extras, i.e. `pip install "unstructured[docx]"`.
- To install the dependencies for all document types, use `pip install "unstructured[all-docs]"`.
- Install the following system dependencies if they are not already available on your system with e.g. `brew install` for Mac.
- Install the following system dependencies if they are not already available on your system.
Depending on what document types you're parsing, you may not need all of these.
- `libmagic-dev` (filetype detection)
- `poppler-utils` (images and PDFs)
- `tesseract-ocr`(images and PDFs)
- `qpdf` (PDFs)
- `libreoffice` (MS Office docs)
- `pandoc` (EPUBs)
When running locally, Unstructured also recommends using Docker [by following this guide](https://docs.unstructured.io/open-source/installation/docker-installation)
to ensure all system dependencies are installed correctly.
If you want to get up and running with less set up, you can
simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or
`UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API.

View File

@@ -20,16 +20,6 @@
"Let's load the Azure OpenAI Embedding class with environment variables set to indicate to use Azure endpoints."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "228faf0c",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -190,9 +180,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "poetry-venv",
"language": "python",
"name": "python3"
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
@@ -204,7 +194,12 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.1"
},
"vscode": {
"interpreter": {
"hash": "7377c2ccc78bc62c2683122d48c8cd1fb85a53850a1b1fc29736ed39852c9885"
}
}
},
"nbformat": 4,

Some files were not shown because too many files have changed in this diff Show More