Compare commits

..

3 Commits

Author SHA1 Message Date
Mason Daugherty
69f36578d3 Merge branch 'master' into mdrxy/post-patch-fixes 2026-01-22 15:15:08 -05:00
Mason Daugherty
bc06782b1a Merge branch 'master' into mdrxy/post-patch-fixes 2025-12-10 15:05:51 -05:00
Mason Daugherty
48ea9f104b fix(core): fix for exclude_inputs POST 2025-11-27 02:56:07 -05:00
377 changed files with 22737 additions and 7918 deletions

2
.github/CODEOWNERS vendored
View File

@@ -1,3 +1,3 @@
/.github/ @ccurme @eyurtsev @mdrxy
/.github/ @baskaryan @ccurme @eyurtsev
/libs/core/ @eyurtsev
/libs/partners/ @ccurme @mdrxy

View File

@@ -53,6 +53,7 @@ body:
- label: langchain-anthropic
- label: langchain-classic
- label: langchain-core
- label: langchain-cli
- label: langchain-model-profiles
- label: langchain-tests
- label: langchain-text-splitters
@@ -70,16 +71,6 @@ body:
- label: langchain-qdrant
- label: langchain-xai
- label: Other / not sure / general
- type: textarea
id: related
validations:
required: false
attributes:
label: Related Issues / PRs
description: |
If this bug is related to any existing issues or pull requests, please link them here.
placeholder: |
* e.g. #123, #456
- type: textarea
id: reproduction
validations:

View File

@@ -50,6 +50,7 @@ body:
- label: langchain-anthropic
- label: langchain-classic
- label: langchain-core
- label: langchain-cli
- label: langchain-model-profiles
- label: langchain-tests
- label: langchain-text-splitters

View File

@@ -30,6 +30,7 @@ body:
- label: langchain-anthropic
- label: langchain-classic
- label: langchain-core
- label: langchain-cli
- label: langchain-model-profiles
- label: langchain-tests
- label: langchain-text-splitters

View File

@@ -101,6 +101,7 @@ body:
- label: langchain-anthropic
- label: langchain-classic
- label: langchain-core
- label: langchain-cli
- label: langchain-model-profiles
- label: langchain-tests
- label: langchain-text-splitters

View File

@@ -1,7 +1,6 @@
(Replace this entire block of text)
Read the full contributing guidelines: https://docs.langchain.com/oss/python/contributing/overview
If you paste a large clearly AI generated description here your PR may be IGNORED or CLOSED!
Thank you for contributing to LangChain! Follow these steps to have your pull request considered as ready for review.
@@ -24,15 +23,8 @@ Thank you for contributing to LangChain! Follow these steps to have your pull re
- We will not consider a PR unless these three are passing in CI.
4. How did you verify your code works?
Additional guidelines:
- We ask that if you use generative AI for your contribution, you include a disclaimer.
- PRs should not touch more than one package unless absolutely necessary.
- Do not update the `uv.lock` files or add dependencies to `pyproject.toml` files (even optional ones) unless you have explicit permission to do so by a maintainer.
## Social handles (optional)
<!-- If you'd like a shoutout on release, add your socials below -->
Twitter: @
LinkedIn: https://linkedin.com/in/

View File

@@ -17,6 +17,11 @@ langchain:
- any-glob-to-any-file:
- "libs/langchain_v1/**/*"
cli:
- changed-files:
- any-glob-to-any-file:
- "libs/cli/**/*"
standard-tests:
- changed-files:
- any-glob-to-any-file:

View File

@@ -56,7 +56,7 @@ def all_package_dirs() -> Set[str]:
return {
"/".join(path.split("/")[:-1]).lstrip("./")
for path in glob.glob("./libs/**/pyproject.toml", recursive=True)
if "libs/standard-tests" not in path
if "libs/cli" not in path and "libs/standard-tests" not in path
}
@@ -286,6 +286,10 @@ if __name__ == "__main__":
dirs_to_run["test"].add("libs/partners/fireworks")
dirs_to_run["test"].add("libs/partners/groq")
elif file.startswith("libs/cli"):
dirs_to_run["lint"].add("libs/cli")
dirs_to_run["test"].add("libs/cli")
elif file.startswith("libs/partners"):
partner_dir = file.split("/")[2]
if os.path.isdir(f"libs/partners/{partner_dir}") and [

View File

@@ -394,9 +394,10 @@ jobs:
runs-on: ubuntu-latest
permissions:
contents: read
if: false # temporarily skip
strategy:
matrix:
partner: [anthropic]
partner: [openai, anthropic]
fail-fast: false # Continue testing other partners if one fails
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
@@ -539,7 +540,7 @@ jobs:
- test-pypi-publish
- pre-release-checks
- test-dependents
- test-prior-published-packages-against-new-core
# - test-prior-published-packages-against-new-core
# Run if all needed jobs succeeded or were skipped (test-dependents only runs for core/langchain_v1)
if: ${{ !cancelled() && !failure() }}
runs-on: ubuntu-latest

View File

@@ -30,6 +30,7 @@ jobs:
"langchain-anthropic": "anthropic",
"langchain-classic": "langchain-classic",
"langchain-core": "core",
"langchain-cli": "cli",
"langchain-model-profiles": "model-profiles",
"langchain-tests": "standard-tests",
"langchain-text-splitters": "text-splitters",

View File

@@ -1,42 +0,0 @@
# Ensures CLAUDE.md and AGENTS.md stay synchronized.
#
# These files contain the same development guidelines but are named differently
# for compatibility with different AI coding assistants (Claude Code uses CLAUDE.md,
# other tools may use AGENTS.md).
name: "🔄 Check CLAUDE.md / AGENTS.md Sync"
on:
push:
branches: [master]
paths:
- "CLAUDE.md"
- "AGENTS.md"
pull_request:
paths:
- "CLAUDE.md"
- "AGENTS.md"
permissions:
contents: read
jobs:
check-sync:
name: "verify files are identical"
runs-on: ubuntu-latest
steps:
- name: "📋 Checkout Code"
uses: actions/checkout@v6
- name: "🔍 Check CLAUDE.md and AGENTS.md are in sync"
run: |
if ! diff -q CLAUDE.md AGENTS.md > /dev/null 2>&1; then
echo "❌ CLAUDE.md and AGENTS.md are out of sync!"
echo ""
echo "These files must contain identical content."
echo "Differences:"
echo ""
diff --color=always CLAUDE.md AGENTS.md || true
exit 1
fi
echo "✅ CLAUDE.md and AGENTS.md are in sync"

View File

@@ -8,7 +8,7 @@
#
# Examples:
# feat(core): add multitenant support
# fix(langchain): resolve error
# fix(cli): resolve flag parsing error
# docs: update API usage examples
# docs(openai): update API usage examples
#
@@ -27,18 +27,12 @@
# * release — prepare a new release
#
# Allowed Scope(s) (optional):
# core, langchain, langchain-classic, model-profiles,
# core, cli, langchain, langchain-classic, model-profiles,
# standard-tests, text-splitters, docs, anthropic, chroma, deepseek, exa,
# fireworks, groq, huggingface, mistralai, nomic, ollama, openai,
# perplexity, prompty, qdrant, xai, infra, deps
#
# Multiple scopes can be used by separating them with a comma. For example:
#
# feat(core,langchain): add multitenant support to core and langchain
#
# Note: PRs touching the langchain package should use the 'langchain' scope. It is not
# acceptable to omit the scope for changes to the langchain package, despite it being
# the main package & name of the repo.
# Multiple scopes can be used by separating them with a comma.
#
# Rules:
# 1. The 'Type' must start with a lowercase letter.
@@ -85,6 +79,7 @@ jobs:
release
scopes: |
core
cli
langchain
langchain-classic
model-profiles

View File

@@ -32,14 +32,14 @@ jobs:
steps:
- name: Generate GitHub App token
id: app-token
uses: actions/create-github-app-token@v2
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.ORG_MEMBERSHIP_APP_ID }}
private-key: ${{ secrets.ORG_MEMBERSHIP_APP_PRIVATE_KEY }}
- name: Check if contributor is external
id: check-membership
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
github-token: ${{ steps.app-token.outputs.token }}
script: |
@@ -77,7 +77,7 @@ jobs:
- name: Add external label to issue
if: steps.check-membership.outputs.is-external == 'true' && github.event_name == 'issues'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -95,7 +95,7 @@ jobs:
- name: Add external label to pull request
if: steps.check-membership.outputs.is-external == 'true' && github.event_name == 'pull_request_target'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -113,7 +113,7 @@ jobs:
- name: Add internal label to issue
if: steps.check-membership.outputs.is-external == 'false' && github.event_name == 'issues'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -131,7 +131,7 @@ jobs:
- name: Add internal label to pull request
if: steps.check-membership.outputs.is-external == 'false' && github.event_name == 'pull_request_target'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |

View File

@@ -22,6 +22,7 @@ langchain/
│ ├── text-splitters/ # Document chunking utilities
│ ├── standard-tests/ # Shared test suite for integrations
│ ├── model-profiles/ # Model configuration profiles
│ └── cli/ # Command-line interface tools
├── .github/ # CI/CD workflows and templates
├── .vscode/ # VSCode IDE standard settings and recommended extensions
└── README.md # Information about LangChain
@@ -32,7 +33,7 @@ langchain/
- **Integration layer** (`partners/`): Third-party service integrations. Note that this monorepo is not exhaustive of all LangChain integrations; some are maintained in separate repos, such as `langchain-ai/langchain-google` and `langchain-ai/langchain-aws`. Usually these repos are cloned at the same level as this monorepo, so if needed, you can refer to their code directly by navigating to `../langchain-google/` from this monorepo.
- **Testing layer** (`standard-tests/`): Standardized integration tests for partner integrations
### Development tools & commands
### Development tools & commands**
- `uv` Fast Python package installer and resolver (replaces pip/poetry)
- `make` Task runner for common development commands. Feel free to look at the `Makefile` for available commands and usage patterns.
@@ -44,16 +45,6 @@ This monorepo uses `uv` for dependency management. Local development uses editab
Each package in `libs/` has its own `pyproject.toml` and `uv.lock`.
Before running your tests, setup all packages by running:
```bash
# For all groups
uv sync --all-groups
# or, to install a specific group only:
uv sync --group test
```
```bash
# Run unit tests (no network)
make test
@@ -81,15 +72,7 @@ uv run --group lint mypy .
#### Commit standards
Suggest PR titles that follow Conventional Commits format. Refer to .github/workflows/pr_lint for allowed types and scopes. Note that all commit/PR titles should be in lowercase with the exception of proper nouns/named entities. All PR titles should include a scope with no exceptions. For example:
```txt
feat(langchain): add new chat completion feature
fix(core): resolve type hinting issue in vector store
chore(anthropic): update infrastructure dependencies
```
Note how `feat(langchain)` includes a scope even though it is the main package and name of the repo.
Suggest PR titles that follow Conventional Commits format. Refer to .github/workflows/pr_lint for allowed types and scopes.
#### Pull request guidelines
@@ -128,7 +111,7 @@ def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
known_users: Set of known/valid user identifiers.
Returns:
List of users that are not in the `known_users` set.
List of users that are not in the known_users set.
"""
```
@@ -196,4 +179,4 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
## Additional resources
- **Documentation:** https://docs.langchain.com/oss/python/langchain/overview and source at https://github.com/langchain-ai/docs or `../docs/`. Prefer the local install and use file search tools for best results. If needed, use the docs MCP server as defined in `.mcp.json` for programmatic access.
- **Contributing Guide:** [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview)
- **Contributing Guide:** [`.github/CONTRIBUTING.md`](https://docs.langchain.com/oss/python/contributing/overview)

View File

@@ -22,6 +22,7 @@ langchain/
│ ├── text-splitters/ # Document chunking utilities
│ ├── standard-tests/ # Shared test suite for integrations
│ ├── model-profiles/ # Model configuration profiles
│ └── cli/ # Command-line interface tools
├── .github/ # CI/CD workflows and templates
├── .vscode/ # VSCode IDE standard settings and recommended extensions
└── README.md # Information about LangChain
@@ -32,7 +33,7 @@ langchain/
- **Integration layer** (`partners/`): Third-party service integrations. Note that this monorepo is not exhaustive of all LangChain integrations; some are maintained in separate repos, such as `langchain-ai/langchain-google` and `langchain-ai/langchain-aws`. Usually these repos are cloned at the same level as this monorepo, so if needed, you can refer to their code directly by navigating to `../langchain-google/` from this monorepo.
- **Testing layer** (`standard-tests/`): Standardized integration tests for partner integrations
### Development tools & commands
### Development tools & commands**
- `uv` Fast Python package installer and resolver (replaces pip/poetry)
- `make` Task runner for common development commands. Feel free to look at the `Makefile` for available commands and usage patterns.
@@ -44,16 +45,6 @@ This monorepo uses `uv` for dependency management. Local development uses editab
Each package in `libs/` has its own `pyproject.toml` and `uv.lock`.
Before running your tests, setup all packages by running:
```bash
# For all groups
uv sync --all-groups
# or, to install a specific group only:
uv sync --group test
```
```bash
# Run unit tests (no network)
make test
@@ -81,15 +72,7 @@ uv run --group lint mypy .
#### Commit standards
Suggest PR titles that follow Conventional Commits format. Refer to .github/workflows/pr_lint for allowed types and scopes. Note that all commit/PR titles should be in lowercase with the exception of proper nouns/named entities. All PR titles should include a scope with no exceptions. For example:
```txt
feat(langchain): add new chat completion feature
fix(core): resolve type hinting issue in vector store
chore(anthropic): update infrastructure dependencies
```
Note how `feat(langchain)` includes a scope even though it is the main package and name of the repo.
Suggest PR titles that follow Conventional Commits format. Refer to .github/workflows/pr_lint for allowed types and scopes.
#### Pull request guidelines
@@ -128,7 +111,7 @@ def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
known_users: Set of known/valid user identifiers.
Returns:
List of users that are not in the `known_users` set.
List of users that are not in the known_users set.
"""
```
@@ -196,4 +179,4 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
## Additional resources
- **Documentation:** https://docs.langchain.com/oss/python/langchain/overview and source at https://github.com/langchain-ai/docs or `../docs/`. Prefer the local install and use file search tools for best results. If needed, use the docs MCP server as defined in `.mcp.json` for programmatic access.
- **Contributing Guide:** [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview)
- **Contributing Guide:** [`.github/CONTRIBUTING.md`](https://docs.langchain.com/oss/python/contributing/overview)

View File

@@ -1,15 +0,0 @@
# Contributing to LangChain
Thanks for your interest in contributing to LangChain!
We have moved our contributing guidelines to our documentation site to keep them up-to-date and easy to access.
👉 **[Read the Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview)**
This guide includes instructions on:
- How to set up your development environment
- How to run tests and linting
- How to submit a Pull Request
- Coding standards and best practices
We look forward to your contributions!

View File

@@ -36,7 +36,6 @@ If you're looking for more advanced customization or agent orchestration, check
- [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) Comprehensive documentation, including conceptual overviews and guides
- [reference.langchain.com/python](https://reference.langchain.com/python) API reference docs for LangChain packages
- [Chat LangChain](https://chat.langchain.com/) Chat with the LangChain documentation and get answers to your questions
**Discussions**: Visit the [LangChain Forum](https://forum.langchain.com) to connect with the community and share all of your technical questions, ideas, and feedback.
@@ -62,11 +61,11 @@ While the LangChain framework can be used standalone, it also integrates seamles
To improve your LLM application development, pair LangChain with:
- [Deep Agents](https://github.com/langchain-ai/deepagents) *(new!)* Build agents that can plan, use subagents, and leverage file systems for complex tasks
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
- [Integrations](https://docs.langchain.com/oss/python/integrations/providers/overview) List of LangChain integrations, including chat & embedding models, tools & toolkits, and more
- [LangSmith](https://www.langchain.com/langsmith) Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
- [LangSmith Deployment](https://docs.langchain.com/langsmith/deployments) Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams and iterate quickly with visual prototyping in [LangSmith Studio](https://docs.langchain.com/langsmith/studio).
- [Deep Agents](https://github.com/langchain-ai/deepagents) *(new!)* Build agents that can plan, use subagents, and leverage file systems for complex tasks
## Additional resources

159
libs/cli/.gitignore vendored Normal file
View File

@@ -0,0 +1,159 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.integration_test

189
libs/cli/DOCS.md Normal file
View File

@@ -0,0 +1,189 @@
# `langchain`
**Usage**:
```console
$ langchain [OPTIONS] COMMAND [ARGS]...
```
**Options**:
* `--help`: Show this message and exit.
* `-v, --version`: Print current CLI version.
**Commands**:
* `app`: Manage LangChain apps
* `serve`: Start the LangServe app, whether it's a...
* `template`: Develop installable templates.
## `langchain app`
Manage LangChain apps
**Usage**:
```console
$ langchain app [OPTIONS] COMMAND [ARGS]...
```
**Options**:
* `--help`: Show this message and exit.
**Commands**:
* `add`: Adds the specified template to the current...
* `new`: Create a new LangServe application.
* `remove`: Removes the specified package from the...
* `serve`: Starts the LangServe app.
### `langchain app add`
Adds the specified template to the current LangServe app.
e.g.:
langchain app add extraction-openai-functions
langchain app add git+ssh://git@github.com/efriis/simple-pirate.git
**Usage**:
```console
$ langchain app add [OPTIONS] [DEPENDENCIES]...
```
**Arguments**:
* `[DEPENDENCIES]...`: The dependency to add
**Options**:
* `--api-path TEXT`: API paths to add
* `--project-dir PATH`: The project directory
* `--repo TEXT`: Install templates from a specific github repo instead
* `--branch TEXT`: Install templates from a specific branch
* `--help`: Show this message and exit.
### `langchain app new`
Create a new LangServe application.
**Usage**:
```console
$ langchain app new [OPTIONS] NAME
```
**Arguments**:
* `NAME`: The name of the folder to create [required]
**Options**:
* `--package TEXT`: Packages to seed the project with
* `--help`: Show this message and exit.
### `langchain app remove`
Removes the specified package from the current LangServe app.
**Usage**:
```console
$ langchain app remove [OPTIONS] API_PATHS...
```
**Arguments**:
* `API_PATHS...`: The API paths to remove [required]
**Options**:
* `--help`: Show this message and exit.
### `langchain app serve`
Starts the LangServe app.
**Usage**:
```console
$ langchain app serve [OPTIONS]
```
**Options**:
* `--port INTEGER`: The port to run the server on
* `--host TEXT`: The host to run the server on
* `--app TEXT`: The app to run, e.g. `app.server:app`
* `--help`: Show this message and exit.
## `langchain serve`
Start the LangServe app, whether it's a template or an app.
**Usage**:
```console
$ langchain serve [OPTIONS]
```
**Options**:
* `--port INTEGER`: The port to run the server on
* `--host TEXT`: The host to run the server on
* `--help`: Show this message and exit.
## `langchain template`
Develop installable templates.
**Usage**:
```console
$ langchain template [OPTIONS] COMMAND [ARGS]...
```
**Options**:
* `--help`: Show this message and exit.
**Commands**:
* `new`: Creates a new template package.
* `serve`: Starts a demo app for this template.
### `langchain template new`
Creates a new template package.
**Usage**:
```console
$ langchain template new [OPTIONS] NAME
```
**Arguments**:
* `NAME`: The name of the folder to create [required]
**Options**:
* `--with-poetry / --no-poetry`: Don't run poetry install [default: no-poetry]
* `--help`: Show this message and exit.
### `langchain template serve`
Starts a demo app for this template.
**Usage**:
```console
$ langchain template serve [OPTIONS]
```
**Options**:
* `--port INTEGER`: The port to run the server on
* `--host TEXT`: The host to run the server on
* `--help`: Show this message and exit.

21
libs/cli/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

53
libs/cli/Makefile Normal file
View File

@@ -0,0 +1,53 @@
######################
# LINTING AND FORMATTING
######################
.EXPORT_ALL_VARIABLES:
UV_FROZEN = true
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/cli --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain_cli
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff check $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && uv run --group typing --group lint mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff format $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff check --fix $(PYTHON_FILES)
test tests: _test _e2e_test
PYTHON = .venv/bin/python
_test:
uv run --group test pytest tests
# custom integration testing for cli integration flow
# currently ignores vectorstores test because lacks implementation
_e2e_test:
rm -rf .integration_test
mkdir .integration_test
cd .integration_test && \
python3 -m venv .venv && \
$(PYTHON) -m pip install --upgrade uv && \
$(PYTHON) -m pip install -e .. && \
$(PYTHON) -m langchain_cli.cli integration new --name parrot-link --name-class ParrotLink && \
$(PYTHON) -m langchain_cli.cli integration new --name parrot-link --name-class ParrotLinkB --src=integration_template/chat_models.py --dst=langchain-parrot-link/langchain_parrot_link/chat_models_b.py && \
$(PYTHON) -m langchain_cli.cli integration create-doc --name parrot-link --name-class ParrotLinkB --component-type ChatModel --destination-dir langchain-parrot-link/docs && \
cd langchain-parrot-link && \
unset UV_FROZEN && \
unset VIRTUAL_ENV && \
uv sync && \
uv add --editable ../../../standard-tests && \
make format lint tests && \
uv add --editable ../../../core && \
make integration_test

30
libs/cli/README.md Normal file
View File

@@ -0,0 +1,30 @@
# langchain-cli
[![PyPI - Version](https://img.shields.io/pypi/v/langchain-cli?label=%20)](https://pypi.org/project/langchain-cli/#history)
[![PyPI - License](https://img.shields.io/pypi/l/langchain-cli)](https://opensource.org/licenses/MIT)
[![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain-cli)](https://pypistats.org/packages/langchain-cli)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchain.svg?style=social&label=Follow%20%40LangChain)](https://x.com/langchain)
## Quick Install
```bash
pip install langchain-cli
```
## 🤔 What is this?
This package implements the official CLI for LangChain. Right now, it is most useful for getting started with LangChain Templates!
## 📖 Documentation
[CLI Docs](https://github.com/langchain-ai/langchain/blob/master/libs/cli/DOCS.md)
## 📕 Releases & Versioning
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
## 💁 Contributing
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).

View File

@@ -0,0 +1,7 @@
"""LangChain CLI."""
from langchain_cli._version import __version__
__all__ = [
"__version__",
]

View File

@@ -0,0 +1,10 @@
from importlib import metadata
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
__all__ = ["__version__"]

View File

@@ -0,0 +1,88 @@
"""LangChain CLI."""
from __future__ import annotations
from typing import Annotated
import typer
from langchain_cli._version import __version__
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.namespaces.migrate import main as migrate_namespace
from langchain_cli.utils.packages import get_langserve_export, get_package_root
app = typer.Typer(no_args_is_help=True, add_completion=False)
app.add_typer(
template_namespace.package_cli,
name="template",
help=template_namespace.__doc__,
)
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
app.add_typer(
integration_namespace.integration_cli,
name="integration",
help=integration_namespace.__doc__,
)
app.command(
name="migrate",
context_settings={
# Let Grit handle the arguments
"allow_extra_args": True,
"ignore_unknown_options": True,
},
)(
migrate_namespace.migrate,
)
def _version_callback(*, show_version: bool) -> None:
if show_version:
typer.echo(f"langchain-cli {__version__}")
raise typer.Exit
@app.callback()
def _main(
*,
version: bool = typer.Option(
False, # noqa: FBT003
"--version",
"-v",
help="Print the current CLI version.",
callback=_version_callback,
is_eager=True,
),
) -> None:
pass
@app.command()
def serve(
*,
port: Annotated[
int | None,
typer.Option(help="The port to run the server on"),
] = None,
host: Annotated[
str | None,
typer.Option(help="The host to run the server on"),
] = None,
) -> None:
"""Start the LangServe app, whether it's a template or an app."""
try:
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
get_langserve_export(pyproject)
except (KeyError, FileNotFoundError):
# not a template
app_namespace.serve(port=port, host=host)
else:
# is a template
template_namespace.serve(port=port, host=host)
if __name__ == "__main__":
app()

View File

@@ -0,0 +1,5 @@
"""LangChain CLI constants."""
DEFAULT_GIT_REPO = "https://github.com/langchain-ai/langchain.git"
DEFAULT_GIT_SUBDIRECTORY = "templates"
DEFAULT_GIT_REF = "master"

View File

@@ -0,0 +1,70 @@
"""Development Scripts for template packages."""
from collections.abc import Sequence
from typing import Literal
from fastapi import FastAPI
from langserve import add_routes
from langchain_cli.utils.packages import get_langserve_export, get_package_root
def create_demo_server(
*,
config_keys: Sequence[str] = (),
playground_type: Literal["default", "chat"] = "default",
) -> FastAPI:
"""Create a demo server for the current template.
Args:
config_keys: Optional sequence of config keys to expose in the playground.
playground_type: The type of playground to use.
Returns:
The demo server.
Raises:
KeyError: If the `pyproject.toml` file is missing required fields.
ImportError: If the module defined in `pyproject.toml` cannot be imported.
"""
app = FastAPI()
package_root = get_package_root()
pyproject = package_root / "pyproject.toml"
try:
package = get_langserve_export(pyproject)
mod = __import__(package["module"], fromlist=[package["attr"]])
chain = getattr(mod, package["attr"])
add_routes(
app,
chain,
config_keys=config_keys,
playground_type=playground_type,
)
except KeyError as e:
msg = "Missing fields from pyproject.toml"
raise KeyError(msg) from e
except ImportError as e:
msg = "Could not import module defined in pyproject.toml"
raise ImportError(msg) from e
return app
def create_demo_server_configurable() -> FastAPI:
"""Create a configurable demo server.
Returns:
The configurable demo server.
"""
return create_demo_server(config_keys=["configurable"])
def create_demo_server_chat() -> FastAPI:
"""Create a chat demo server.
Returns:
The chat demo server.
"""
return create_demo_server(playground_type="chat")

View File

@@ -0,0 +1 @@
__pycache__

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,58 @@
.PHONY: all format lint test tests integration_tests help extended_tests
# Default target executed when no arguments are given to make.
all: help
# Define a variable for the test file path.
TEST_FILE ?= tests/unit_tests/
integration_test integration_tests: TEST_FILE = tests/integration_tests/
# unit tests are run with the --disable-socket flag to prevent network calls
test tests:
uv run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
test_watch:
uv run ptw --snapshot-update --now . -- -vv $(TEST_FILE)
# integration tests are run without the --disable-socket flag to allow network calls
integration_test integration_tests:
uv run pytest $(TEST_FILE)
######################
# LINTING AND FORMATTING
######################
# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/partners/__package_name_short__ --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=__module_name__
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test
lint lint_diff lint_package lint_tests:
[ "$(PYTHON_FILES)" = "" ] || uv run ruff check $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || uv run ruff format $(PYTHON_FILES) --diff
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && uv run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
format format_diff:
[ "$(PYTHON_FILES)" = "" ] || uv run ruff format $(PYTHON_FILES)
[ "$(PYTHON_FILES)" = "" ] || uv run ruff check --fix $(PYTHON_FILES)
check_imports: $(shell find __module_name__ -name '*.py')
uv run python ./scripts/check_imports.py $^
######################
# HELP
######################
help:
@echo '----'
@echo 'check_imports - check imports'
@echo 'format - run code formatters'
@echo 'lint - run linters'
@echo 'test - run unit tests'
@echo 'tests - run unit tests'
@echo 'test TEST_FILE=<test_file> - run all tests in file'

View File

@@ -0,0 +1,46 @@
# __package_name__
This package contains the LangChain integration with __ModuleName__
## Installation
```bash
pip install -U __package_name__
```
And you should configure credentials by setting the following environment variables:
* TODO: fill this out
## Chat Models
`Chat__ModuleName__` class exposes chat models from __ModuleName__.
```python
from __module_name__ import Chat__ModuleName__
model = Chat__ModuleName__()
model.invoke("Sing a ballad of LangChain.")
```
## Embeddings
`__ModuleName__Embeddings` class exposes embeddings from __ModuleName__.
```python
from __module_name__ import __ModuleName__Embeddings
embeddings = __ModuleName__Embeddings()
embeddings.embed_query("What is the meaning of life?")
```
## LLMs
`__ModuleName__LLM` class exposes LLMs from __ModuleName__.
```python
from __module_name__ import __ModuleName__LLM
model = __ModuleName__LLM()
model.invoke("The meaning of life is")
```

View File

@@ -0,0 +1,264 @@
{
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# Chat__ModuleName__\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": [
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import Chat__ModuleName__\n",
"\n",
"model = Chat__ModuleName__(\n",
" model=\"model-name\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = model.invoke(messages)\n",
"ai_msg"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {},
"outputs": [],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | model\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this model provider\n",
"\n",
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,219 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# __ModuleName__Loader\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This notebook provides a quick overview for getting started with __ModuleName__ [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name___loader.__ModuleName__Loader.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/__module_name___loader)|\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
"| [__ModuleName__Loader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name__loader.__ModuleName__Loader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ | \n",
"### Loader features\n",
"| Source | Document Lazy Loading | Native Async Support\n",
"| :---: | :---: | :---: | \n",
"| __ModuleName__Loader | ✅/❌ | ✅/❌ | \n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ document loader you'll need to install the `__package_name__` integration package, and create a **ModuleName** account and get an API key.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"Install **langchain_community**.\n",
"\n",
"- TODO: Add any other required packages"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain_community"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Initialization\n",
"\n",
"Now we can instantiate our model object and load documents:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import __ModuleName__Loader\n",
"\n",
"loader = __ModuleName__Loader(\n",
" # required params = ...\n",
" # optional params = ...\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load\n",
"\n",
"- TODO: Run cells to show loading capabilities"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"docs = loader.load()\n",
"docs[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(docs[0].metadata)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Lazy Load\n",
"\n",
"- TODO: Run cells to show lazy loading capabilities. Delete if lazy loading is not implemented."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"page = []\n",
"for doc in loader.lazy_load():\n",
" page.append(doc)\n",
" if len(page) >= 10:\n",
" # do some paged operation, e.g.\n",
" # index.upsert(page)\n",
"\n",
" page = []"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this document loader\n",
"\n",
"E.g. using specific configs for different loading behavior. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__Loader features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name___loader.__ModuleName__Loader.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -0,0 +1,238 @@
{
"cells": [
{
"cell_type": "raw",
"id": "67db2992",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "9597802c",
"metadata": {},
"source": [
"# __ModuleName__LLM\n",
"\n",
"- [ ] TODO: Make sure API reference link is correct\n",
"\n",
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__&label=%20) |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bc51e756",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "4b6e1ca6",
"metadata": {},
"source": [
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "196c2b41",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "809c6577",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59c710c4",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "0a760037",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a0562a13",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__LLM\n",
"\n",
"model = __ModuleName__LLM(\n",
" model=\"model-name\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
" max_retries=2,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0ee90032",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"- [ ] TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "035dea0f",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"input_text = \"__ModuleName__ is an AI company that \"\n",
"\n",
"completion = model.invoke(input_text)\n",
"completion"
]
},
{
"cell_type": "markdown",
"id": "add38532",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
"\n",
"- TODO: Run cells so output can be seen."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "078e9db2",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"prompt = PromptTemplate(\"How to say {input} in {output_language}:\\n\")\n",
"\n",
"chain = prompt | model\n",
"chain.invoke(\n",
" {\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "e99eef30",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this model provider\n",
"\n",
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
]
},
{
"cell_type": "markdown",
"id": "e9bdfcef",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.1 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"vscode": {
"interpreter": {
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,50 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# __ModuleName__\n",
"\n",
"__ModuleName__ is a platform that offers..."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "y8ku6X96sebl"
},
"outputs": [],
"source": [
"from __module_name__ import Chat__ModuleName__\n",
"from __module_name__ import __ModuleName__LLM\n",
"from __module_name__ import __ModuleName__VectorStore"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@@ -0,0 +1,245 @@
{
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# __ModuleName__Retriever\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with the __ModuleName__ [retriever](/docs/concepts/retrievers). For detailed documentation of all __ModuleName__Retriever features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/retrievers/__module_name__.retrievers.__ModuleName__.__ModuleName__Retriever.html).\n",
"\n",
"### Integration details\n",
"\n",
"TODO: Select one of the tables below, as appropriate.\n",
"\n",
"1: Bring-your-own data (i.e., index and search a custom corpus of documents):\n",
"\n",
"| Retriever | Self-host | Cloud offering | Package |\n",
"| :--- | :--- | :---: | :---: |\n",
"[__ModuleName__Retriever](https://api.python.langchain.com/en/latest/retrievers/__package_name__.retrievers.__module_name__.__ModuleName__Retriever.html) | ❌ | ❌ | __package_name__ |\n",
"\n",
"2: External index (e.g., constructed from Internet data or similar)):\n",
"\n",
"| Retriever | Source | Package |\n",
"| :--- | :--- | :---: |\n",
"[__ModuleName__Retriever](https://api.python.langchain.com/en/latest/retrievers/__package_name__.retrievers.__module_name__.__ModuleName__Retriever.html) | Source description | __package_name__ |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info."
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": [
"If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"This retriever lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our retriever:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70cc8e65-2a02-408a-bbc6-8ef649057d82",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__Retriever\n",
"\n",
"retriever = __ModuleName__Retriever(\n",
" # ...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "5c5f2839-4020-424e-9fc9-07777eede442",
"metadata": {},
"source": [
"## Usage"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
"metadata": {},
"outputs": [],
"source": [
"query = \"...\"\n",
"\n",
"retriever.invoke(query)"
]
},
{
"cell_type": "markdown",
"id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e",
"metadata": {},
"source": [
"## Use within a chain\n",
"\n",
"Like other retrievers, __ModuleName__Retriever can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n",
"\n",
"We will need a LLM or chat model:\n",
"\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "25b647a3-f8f2-4541-a289-7a241e43f9df",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\n",
" \"\"\"Answer the question based only on the context provided.\n",
"\n",
"Context: {context}\n",
"\n",
"Question: {question}\"\"\"\n",
")\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"\n",
"chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | model\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8",
"metadata": {},
"outputs": [],
"source": [
"chain.invoke(\"...\")"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"metadata": {},
"source": [
"## TODO: Any functionality or considerations specific to this retriever\n",
"\n",
"Fill in or delete if not relevant."
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__Retriever features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/retrievers/__module_name__.retrievers.__ModuleName__.__ModuleName__Retriever.html)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,204 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: __ModuleName__ByteStore\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# __ModuleName__ByteStore\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with __ModuleName__ [key-value stores](/docs/concepts/#key-value-stores). For detailed documentation of all __ModuleName__ByteStore features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.__module_name__ByteStore.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/stores/in_memory/ for an example.\n",
"\n",
"## Overview\n",
"\n",
"- TODO: (Optional) A short introduction to the underlying technology/API.\n",
"\n",
"### Integration details\n",
"\n",
"- TODO: Fill in table features.\n",
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
"- TODO: Make sure API reference links are correct.\n",
"\n",
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/_package_name_) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
"| [__ModuleName__ByteStore](https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | ✅/❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/__package_name__&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/__package_name__&label=%20) |\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"To create a __ModuleName__ byte store, you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info, or omit if the service does not require any credentials.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our byte store:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__ByteStore\n",
"\n",
"kv_store = __ModuleName__ByteStore(\n",
" # params...\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Usage\n",
"\n",
"- TODO: Run cells so output can be seen.\n",
"\n",
"You can set data under keys like this using the `mset` method:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"kv_store.mset(\n",
" [\n",
" [\"key1\", b\"value1\"],\n",
" [\"key2\", b\"value2\"],\n",
" ]\n",
")\n",
"\n",
"kv_store.mget(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And you can delete data using the `mdelete` method:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"kv_store.mdelete(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")\n",
"\n",
"kv_store.mget(\n",
" [\n",
" \"key1\",\n",
" \"key2\",\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this key-value store provider\n",
"\n",
"E.g. extra initialization. Delete if not relevant."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__ByteStore features and configurations, head to the API reference: https://api.python.langchain.com/en/latest/stores/__module_name__.stores.__ModuleName__ByteStore.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1,246 @@
{
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "9a3d6f34",
"metadata": {},
"source": [
"# __ModuleName__Embeddings\n",
"\n",
"- [ ] TODO: Make sure API reference link is correct\n",
"\n",
"This will help you get started with __ModuleName__ embedding models using LangChain. For detailed documentation on `__ModuleName__Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/v0.2/api_reference/__package_name_short__/embeddings/__module_name__.embeddings__ModuleName__Embeddings.html).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"| Provider | Package |\n",
"|:--------:|:-------:|\n",
"| [__ModuleName__](/docs/integrations/providers/__package_name_short__/) | [__package_name__](https://python.langchain.com/v0.2/api_reference/__module_name__/embeddings/__module_name__.embeddings__ModuleName__Embeddings.html) |\n",
"\n",
"## Setup\n",
"\n",
"- [ ] TODO: Update with relevant info.\n",
"\n",
"To access __ModuleName__ embedding models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
"\n",
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "36521c2a",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "c84fb993",
"metadata": {},
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
},
{
"cell_type": "code",
"execution_count": null,
"id": "39a4953b",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
]
},
{
"cell_type": "markdown",
"id": "d9664366",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "64853226",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "45dd1724",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9ea7a09b",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__Embeddings\n",
"\n",
"embeddings = __ModuleName__Embeddings(\n",
" model=\"model-name\",\n",
")"
]
},
{
"cell_type": "markdown",
"id": "77d271b6",
"metadata": {},
"source": [
"## Indexing and Retrieval\n",
"\n",
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
"\n",
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d817716b",
"metadata": {},
"outputs": [],
"source": [
"# Create a vector store with a sample text\n",
"from langchain_core.vectorstores import InMemoryVectorStore\n",
"\n",
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
"\n",
"vectorstore = InMemoryVectorStore.from_texts(\n",
" [text],\n",
" embedding=embeddings,\n",
")\n",
"\n",
"# Use the vectorstore as a retriever\n",
"retriever = vectorstore.as_retriever()\n",
"\n",
"# Retrieve the most similar text\n",
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
"\n",
"# show the retrieved document's content\n",
"retrieved_documents[0].page_content"
]
},
{
"cell_type": "markdown",
"id": "e02b9855",
"metadata": {},
"source": [
"## Direct Usage\n",
"\n",
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
"\n",
"You can directly call these methods to get embeddings for your own use cases.\n",
"\n",
"### Embed single texts\n",
"\n",
"You can embed single texts or documents with `embed_query`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0d2befcd",
"metadata": {},
"outputs": [],
"source": [
"single_vector = embeddings.embed_query(text)\n",
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
]
},
{
"cell_type": "markdown",
"id": "1b5a7d03",
"metadata": {},
"source": [
"### Embed multiple texts\n",
"\n",
"You can embed multiple texts with `embed_documents`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f4d6e97",
"metadata": {},
"outputs": [],
"source": [
"text2 = (\n",
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
")\n",
"two_vectors = embeddings.embed_documents([text, text2])\n",
"for vector in two_vectors:\n",
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
]
},
{
"cell_type": "markdown",
"id": "98785c12",
"metadata": {},
"source": [
"## API Reference\n",
"\n",
"For detailed documentation on `__ModuleName__Embeddings` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/embeddings/__module_name__.embeddings.__ModuleName__Embeddings.html).\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,199 @@
{
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# __ModuleName__Toolkit\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This will help you get started with the __ModuleName__ [toolkit](/docs/concepts/tools/#toolkits). For detailed documentation of all __ModuleName__Toolkit features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/agent_toolkits/__module_name__.agent_toolkits.__ModuleName__.toolkit.__ModuleName__Toolkit.html).\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Update with relevant info."
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": "To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"This toolkit lives in the `__package_name__` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU __package_name__"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our toolkit:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
"source": [
"from __module_name__ import __ModuleName__Toolkit\n",
"\n",
"toolkit = __ModuleName__Toolkit(\n",
" # ...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "5c5f2839-4020-424e-9fc9-07777eede442",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"View available tools:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
"metadata": {},
"outputs": [],
"source": [
"toolkit.get_tools()"
]
},
{
"cell_type": "markdown",
"id": "d11245ad-3661-4405-8558-1188896347ec",
"metadata": {},
"source": [
"TODO: list API reference pages for individual tools."
]
},
{
"cell_type": "markdown",
"id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e",
"metadata": {},
"source": [
"## Use within an agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"agent_executor = create_react_agent(llm, tools)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae",
"metadata": {},
"outputs": [],
"source": [
"example_query = \"...\"\n",
"\n",
"events = agent_executor.stream(\n",
" {\"messages\": [(\"user\", example_query)]},\n",
" stream_mode=\"values\",\n",
")\n",
"for event in events:\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"metadata": {},
"source": [
"## TODO: Any functionality or considerations specific to this toolkit\n",
"\n",
"Fill in or delete if not relevant."
]
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__Toolkit features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/agent_toolkits/__module_name__.agent_toolkits.__ModuleName__.toolkit.__ModuleName__Toolkit.html)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,271 @@
{
"cells": [
{
"cell_type": "raw",
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "a6f91f20",
"metadata": {},
"source": [
"# __ModuleName__\n",
"\n",
"- TODO: Make sure API reference link is correct.\n",
"\n",
"This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).\n",
"\n",
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
"\n",
"## Overview\n",
"\n",
"### Integration details\n",
"\n",
"- TODO: Make sure links and features are correct\n",
"\n",
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
"| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-community&label=%20) |\n",
"\n",
"### Tool features\n",
"\n",
"- TODO: Add feature table if it makes sense\n",
"\n",
"\n",
"## Setup\n",
"\n",
"- TODO: Add any additional deps\n",
"\n",
"The integration lives in the `langchain-community` package."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f85b4089",
"metadata": {},
"outputs": [],
"source": [
"%pip install --quiet -U langchain-community"
]
},
{
"cell_type": "markdown",
"id": "b15e9266",
"metadata": {},
"source": [
"### Credentials\n",
"\n",
"- TODO: Add any credentials that are needed"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# if not os.environ.get(\"__MODULE_NAME___API_KEY\"):\n",
"# os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"__MODULE_NAME__ API key:\\n\")"
]
},
{
"cell_type": "markdown",
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
"metadata": {},
"source": [
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"- TODO: Fill in instantiation params\n",
"\n",
"Here we show how to instantiate an instance of the __ModuleName__ tool, with "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.tools import __ModuleName__\n",
"\n",
"\n",
"tool = __ModuleName__(...)"
]
},
{
"cell_type": "markdown",
"id": "74147a1a",
"metadata": {},
"source": [
"## Invocation\n",
"\n",
"### [Invoke directly with args](/docs/concepts/tools/#use-the-tool-directly)\n",
"\n",
"- TODO: Describe what the tool args are, fill them in, run cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
"metadata": {},
"outputs": [],
"source": [
"tool.invoke({...})"
]
},
{
"cell_type": "markdown",
"id": "d6e73897",
"metadata": {},
"source": [
"### [Invoke with ToolCall](/docs/concepts/tool_calling/#tool-execution)\n",
"\n",
"We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:\n",
"\n",
"- TODO: Fill in tool args and run cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f90e33a7",
"metadata": {},
"outputs": [],
"source": [
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
"model_generated_tool_call = {\n",
" \"args\": {...}, # TODO: FILL IN\n",
" \"id\": \"1\",\n",
" \"name\": tool.name,\n",
" \"type\": \"tool_call\",\n",
"}\n",
"tool.invoke(model_generated_tool_call)"
]
},
{
"cell_type": "markdown",
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
"metadata": {},
"source": [
"## Use within an agent\n",
"\n",
"- TODO: Add user question and run cells\n",
"\n",
"We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n",
"\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"# !pip install -qU langchain langchain-openai\n",
"from langchain.chat_models import init_chat_model\n",
"\n",
"model = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bea35fa1",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"tools = [tool]\n",
"agent = create_react_agent(model, tools)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
"metadata": {},
"outputs": [],
"source": [
"example_query = \"...\"\n",
"\n",
"events = agent.stream(\n",
" {\"messages\": [(\"user\", example_query)]},\n",
" stream_mode=\"values\",\n",
")\n",
"for event in events:\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "4ac8146c",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"language": "python",
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,333 @@
{
"cells": [
{
"cell_type": "raw",
"id": "1957f5cb",
"metadata": {},
"source": [
"---\n",
"sidebar_label: __ModuleName__\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "ef1f0986",
"metadata": {},
"source": [
"# __ModuleName__VectorStore\n",
"\n",
"This notebook covers how to get started with the __ModuleName__ vector store."
]
},
{
"cell_type": "markdown",
"id": "36fdc060",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"- TODO: Update with relevant info.\n",
"- TODO: Update minimum version to be correct.\n",
"\n",
"To access __ModuleName__ vector stores you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package."
]
},
{
"cell_type": "raw",
"id": "64e28aa6",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"%pip install -qU \"__package_name__>=MINIMUM_VERSION\""
]
},
{
"cell_type": "markdown",
"id": "9695dee7",
"metadata": {},
"source": [
"### Credentials\n",
"\n",
"- TODO: Update with relevant info.\n",
"\n",
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "894c30e4",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\n",
" \"Enter your __ModuleName__ API key: \"\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "7f98392b",
"metadata": {},
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
},
{
"cell_type": "code",
"execution_count": null,
"id": "e7b6a6e0",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "93df377e",
"metadata": {},
"source": [
"## Initialization\n",
"\n",
"- TODO: Fill out with relevant init params\n",
"\n",
"\n",
"```{=mdx}\n",
"import EmbeddingTabs from \"@theme/EmbeddingTabs\";\n",
"\n",
"<EmbeddingTabs/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc37144c-208d-4ab3-9f3a-0407a69fe052",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from __module_name__.vectorstores import __ModuleName__VectorStore\n",
"\n",
"vector_store = __ModuleName__VectorStore(embeddings=embeddings)"
]
},
{
"cell_type": "markdown",
"id": "ac6071d4",
"metadata": {},
"source": [
"## Manage vector store\n",
"\n",
"### Add items to vector store\n",
"\n",
"- TODO: Edit and then run code cell to generate output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "17f5efc0",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"\n",
"document_1 = Document(page_content=\"foo\", metadata={\"source\": \"https://example.com\"})\n",
"\n",
"document_2 = Document(page_content=\"bar\", metadata={\"source\": \"https://example.com\"})\n",
"\n",
"document_3 = Document(page_content=\"baz\", metadata={\"source\": \"https://example.com\"})\n",
"\n",
"documents = [document_1, document_2, document_3]\n",
"\n",
"vector_store.add_documents(documents=documents, ids=[\"1\", \"2\", \"3\"])"
]
},
{
"cell_type": "markdown",
"id": "c738c3e0",
"metadata": {},
"source": [
"### Update items in vector store\n",
"\n",
"- TODO: Edit and then run code cell to generate output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0aa8b71",
"metadata": {},
"outputs": [],
"source": [
"updated_document = Document(\n",
" page_content=\"qux\", metadata={\"source\": \"https://another-example.com\"}\n",
")\n",
"\n",
"vector_store.update_documents(document_id=\"1\", document=updated_document)"
]
},
{
"cell_type": "markdown",
"id": "dcf1b905",
"metadata": {},
"source": [
"### Delete items from vector store\n",
"\n",
"- TODO: Edit and then run code cell to generate output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef61e188",
"metadata": {},
"outputs": [],
"source": [
"vector_store.delete(ids=[\"3\"])"
]
},
{
"cell_type": "markdown",
"id": "c3620501",
"metadata": {},
"source": [
"## Query vector store\n",
"\n",
"Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent.\n",
"\n",
"### Query directly\n",
"\n",
"Performing a simple similarity search can be done as follows:\n",
"\n",
"- TODO: Edit and then run code cell to generate output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aa0a16fa",
"metadata": {},
"outputs": [],
"source": [
"results = vector_store.similarity_search(\n",
" query=\"thud\", k=1, filter={\"source\": \"https://another-example.com\"}\n",
")\n",
"for doc in results:\n",
" print(f\"* {doc.page_content} [{doc.metadata}]\")"
]
},
{
"cell_type": "markdown",
"id": "3ed9d733",
"metadata": {},
"source": [
"If you want to execute a similarity search and receive the corresponding scores you can run:\n",
"\n",
"- TODO: Edit and then run code cell to generate output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5efd2eaa",
"metadata": {},
"outputs": [],
"source": [
"results = vector_store.similarity_search_with_score(\n",
" query=\"thud\", k=1, filter={\"source\": \"https://example.com\"}\n",
")\n",
"for doc, score in results:\n",
" print(f\"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]\")"
]
},
{
"cell_type": "markdown",
"id": "0c235cdc",
"metadata": {},
"source": [
"### Query by turning into retriever\n",
"\n",
"You can also transform the vector store into a retriever for easier usage in your chains.\n",
"\n",
"- TODO: Edit and then run code cell to generate output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f3460093",
"metadata": {},
"outputs": [],
"source": [
"retriever = vector_store.as_retriever(search_type=\"mmr\", search_kwargs={\"k\": 1})\n",
"retriever.invoke(\"thud\")"
]
},
{
"cell_type": "markdown",
"id": "901c75dc",
"metadata": {},
"source": [
"## Usage for retrieval-augmented generation\n",
"\n",
"For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n",
"\n",
"- [Tutorials](/docs/tutorials/)\n",
"- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)\n",
"- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval/)"
]
},
{
"cell_type": "markdown",
"id": "069f1b5f",
"metadata": {},
"source": [
"## TODO: Any functionality specific to this vector store\n",
"\n",
"E.g. creating a persistent database to save to your disk, etc."
]
},
{
"cell_type": "markdown",
"id": "8a27244f",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all __ModuleName__VectorStore features and configurations head to the API reference: https://api.python.langchain.com/en/latest/vectorstores/__module_name__.vectorstores.__ModuleName__VectorStore.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,27 @@
from importlib import metadata
from __module_name__.chat_models import Chat__ModuleName__
from __module_name__.document_loaders import __ModuleName__Loader
from __module_name__.embeddings import __ModuleName__Embeddings
from __module_name__.retrievers import __ModuleName__Retriever
from __module_name__.toolkits import __ModuleName__Toolkit
from __module_name__.tools import __ModuleName__Tool
from __module_name__.vectorstores import __ModuleName__VectorStore
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
__all__ = [
"Chat__ModuleName__",
"__ModuleName__VectorStore",
"__ModuleName__Embeddings",
"__ModuleName__Loader",
"__ModuleName__Retriever",
"__ModuleName__Toolkit",
"__ModuleName__Tool",
"__version__",
]

View File

@@ -0,0 +1,423 @@
"""__ModuleName__ chat models."""
from typing import Any, Dict, Iterator, List
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
)
from langchain_core.messages.ai import UsageMetadata
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from pydantic import Field
class Chat__ModuleName__(BaseChatModel):
# TODO: Replace all TODOs in docstring. See example docstring:
# https://github.com/langchain-ai/langchain/blob/7ff05357bac6eaedf5058a2af88f23a1817d40fe/libs/partners/openai/langchain_openai/chat_models/base.py#L1120
"""__ModuleName__ chat model integration.
The default implementation echoes the first `parrot_buffer_length` characters of
the input.
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Populate with relevant params.
Key init args — completion params:
model:
Name of __ModuleName__ model to use.
temperature:
Sampling temperature.
max_tokens:
Max number of tokens to generate.
# TODO: Populate with relevant params.
Key init args — client params:
timeout:
Timeout for requests.
max_retries:
Max number of retries.
api_key:
__ModuleName__ API key. If not passed in will be read from env var
__MODULE_NAME___API_KEY.
See full list of supported init args and their descriptions in the params section.
# TODO: Replace with relevant init params.
Instantiate:
```python
from __module_name__ import Chat__ModuleName__
model = Chat__ModuleName__(
model="...",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# api_key="...",
# other params...
)
```
Invoke:
```python
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
model.invoke(messages)
```
```python
# TODO: Example output.
```
# TODO: Delete if token-level streaming isn't supported.
Stream:
```python
for chunk in model.stream(messages):
print(chunk.text, end="")
```
```python
# TODO: Example output.
```
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
```
```python
# TODO: Example output.
```
# TODO: Delete if native async isn't supported.
Async:
```python
await model.ainvoke(messages)
# stream:
# async for chunk in (await model.astream(messages))
# batch:
# await model.abatch([messages])
```
```python
# TODO: Example output.
```
# TODO: Delete if .bind_tools() isn't supported.
Tool calling:
```python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
ai_msg.tool_calls
```
```python
# TODO: Example output.
```
See `Chat__ModuleName__.bind_tools()` method for more.
# TODO: Delete if .with_structured_output() isn't supported.
Structured output:
```python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
```python
# TODO: Example output.
```
See `Chat__ModuleName__.with_structured_output()` for more.
# TODO: Delete if JSON mode response format isn't supported.
JSON mode:
```python
# TODO: Replace with appropriate bind arg.
json_model = model.bind(response_format={"type": "json_object"})
ai_msg = json_model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
ai_msg.content
```
```python
# TODO: Example output.
```
# TODO: Delete if image inputs aren't supported.
Image input:
```python
import base64
import httpx
from langchain_core.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
# TODO: Replace with appropriate message content format.
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
)
ai_msg = model.invoke([message])
ai_msg.content
```
```python
# TODO: Example output.
```
# TODO: Delete if audio inputs aren't supported.
Audio input:
```python
# TODO: Example input
```
```python
# TODO: Example output
```
# TODO: Delete if video inputs aren't supported.
Video input:
```python
# TODO: Example input
```
```python
# TODO: Example output
```
# TODO: Delete if token usage metadata isn't supported.
Token usage:
```python
ai_msg = model.invoke(messages)
ai_msg.usage_metadata
```
```python
{'input_tokens': 28, 'output_tokens': 5, 'total_tokens': 33}
```
# TODO: Delete if logprobs aren't supported.
Logprobs:
```python
# TODO: Replace with appropriate bind arg.
logprobs_model = model.bind(logprobs=True)
ai_msg = logprobs_model.invoke(messages)
ai_msg.response_metadata["logprobs"]
```
```python
# TODO: Example output.
```
Response metadata
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
```python
# TODO: Example output.
```
""" # noqa: E501
model_name: str = Field(alias="model")
"""The name of the model"""
parrot_buffer_length: int
"""The number of characters from the last message of the prompt to be echoed."""
temperature: float | None = None
max_tokens: int | None = None
timeout: int | None = None
stop: list[str] | None = None
max_retries: int = 2
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "chat-__package_name_short__"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {
# The model name allows users to specify custom token counting
# rules in LLM monitoring applications (e.g., in LangSmith users
# can provide per token pricing for their model and monitor
# costs for the given LLM.)
"model_name": self.model_name,
}
def _generate(
self,
messages: List[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Override the _generate method to implement the chat model logic.
This can be a call to an API, a call to a local model, or any other
implementation that generates a response to the input prompt.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
# Replace this with actual logic to generate a response from a list
# of messages.
last_message = messages[-1]
tokens = last_message.content[: self.parrot_buffer_length]
ct_input_tokens = sum(len(message.content) for message in messages)
ct_output_tokens = len(tokens)
message = AIMessage(
content=tokens,
additional_kwargs={}, # Used to add additional payload to the message
response_metadata={ # Use for response metadata
"time_in_seconds": 3,
"model_name": self.model_name,
},
usage_metadata={
"input_tokens": ct_input_tokens,
"output_tokens": ct_output_tokens,
"total_tokens": ct_input_tokens + ct_output_tokens,
},
)
##
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the output of the model.
This method should be implemented if the model can generate output
in a streaming fashion. If the model does not support streaming,
do not implement it. In that case streaming requests will be automatically
handled by the _generate method.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]
tokens = str(last_message.content[: self.parrot_buffer_length])
ct_input_tokens = sum(len(message.content) for message in messages)
for token in tokens:
usage_metadata = UsageMetadata(
{
"input_tokens": ct_input_tokens,
"output_tokens": 1,
"total_tokens": ct_input_tokens + 1,
}
)
ct_input_tokens = 0
chunk = ChatGenerationChunk(
message=AIMessageChunk(content=token, usage_metadata=usage_metadata)
)
if run_manager:
# This is optional in newer versions of LangChain
# The on_llm_new_token will be called automatically
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
# Let's add some other information (e.g., response metadata)
chunk = ChatGenerationChunk(
message=AIMessageChunk(
content="",
response_metadata={"time_in_sec": 3, "model_name": self.model_name},
)
)
if run_manager:
# This is optional in newer versions of LangChain
# The on_llm_new_token will be called automatically
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
# TODO: Implement if Chat__ModuleName__ supports async streaming. Otherwise delete.
# async def _astream(
# self,
# messages: List[BaseMessage],
# stop: list[str] | None = None,
# run_manager: AsyncCallbackManagerForLLMRun | None = None,
# **kwargs: Any,
# ) -> AsyncIterator[ChatGenerationChunk]:
# TODO: Implement if Chat__ModuleName__ supports async generation. Otherwise delete.
# async def _agenerate(
# self,
# messages: List[BaseMessage],
# stop: list[str] | None = None,
# run_manager: AsyncCallbackManagerForLLMRun | None = None,
# **kwargs: Any,
# ) -> ChatResult:

View File

@@ -0,0 +1,74 @@
"""__ModuleName__ document loader."""
from typing import Iterator
from langchain_core.document_loaders.base import BaseLoader
from langchain_core.documents import Document
class __ModuleName__Loader(BaseLoader):
# TODO: Replace all TODOs in docstring. See example docstring:
# https://github.com/langchain-ai/langchain/blob/869523ad728e6b76d77f170cce13925b4ebc3c1e/libs/community/langchain_community/document_loaders/recursive_url_loader.py#L54
"""
__ModuleName__ document loader integration
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Replace with relevant init params.
Instantiate:
```python
from langchain_community.document_loaders import __ModuleName__Loader
loader = __ModuleName__Loader(
# required params = ...
# other params = ...
)
```
Lazy load:
```python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
```
```python
TODO: Example output
```
# TODO: Delete if async load is not implemented
Async load:
```python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
```
```python
TODO: Example output
```
"""
# TODO: This method must be implemented to load documents.
# Do not implement load(), a default implementation is already available.
def lazy_load(self) -> Iterator[Document]:
raise NotImplementedError()
# TODO: Implement if you would like to change default BaseLoader implementation
# async def alazy_load(self) -> AsyncIterator[Document]:

View File

@@ -0,0 +1,96 @@
from typing import List
from langchain_core.embeddings import Embeddings
class __ModuleName__Embeddings(Embeddings):
"""__ModuleName__ embedding model integration.
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Populate with relevant params.
Key init args — completion params:
model: str
Name of __ModuleName__ model to use.
See full list of supported init args and their descriptions in the params section.
# TODO: Replace with relevant init params.
Instantiate:
```python
from __module_name__ import __ModuleName__Embeddings
embed = __ModuleName__Embeddings(
model="...",
# api_key="...",
# other params...
)
```
Embed single text:
```python
input_text = "The meaning of life is 42"
embed.embed_query(input_text)
```
```python
# TODO: Example output.
```
# TODO: Delete if token-level streaming isn't supported.
Embed multiple text:
```python
input_texts = ["Document 1...", "Document 2..."]
embed.embed_documents(input_texts)
```
```python
# TODO: Example output.
```
# TODO: Delete if native async isn't supported.
Async:
```python
await embed.aembed_query(input_text)
# multiple:
# await embed.aembed_documents(input_texts)
```
```python
# TODO: Example output.
```
"""
def __init__(self, model: str):
self.model = model
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs."""
return [[0.5, 0.6, 0.7] for _ in texts]
def embed_query(self, text: str) -> List[float]:
"""Embed query text."""
return self.embed_documents([text])[0]
# optional: add custom async implementations here
# you can also delete these, and the base class will
# use the default implementation, which calls the sync
# version in an async executor:
# async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
# """Asynchronous Embed search docs."""
# ...
# async def aembed_query(self, text: str) -> List[float]:
# """Asynchronous Embed query text."""
# ...

View File

@@ -0,0 +1,107 @@
"""__ModuleName__ retrievers."""
from typing import Any, List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class __ModuleName__Retriever(BaseRetriever):
# TODO: Replace all TODOs in docstring. See example docstring:
# https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/retrievers/tavily_search_api.py#L17
"""__ModuleName__ retriever.
# TODO: Replace with relevant packages, env vars, etc.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Populate with relevant params.
Key init args:
arg 1: type
description
arg 2: type
description
# TODO: Replace with relevant init params.
Instantiate:
```python
from __package_name__ import __ModuleName__Retriever
retriever = __ModuleName__Retriever(
# ...
)
```
Usage:
```python
query = "..."
retriever.invoke(query)
```
```txt
# TODO: Example output.
```
Use within a chain:
```python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
chain.invoke("...")
```
```
# TODO: Example output.
```
"""
k: int = 3
# TODO: This method must be implemented to retrieve documents.
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
k = kwargs.get("k", self.k)
return [
Document(page_content=f"Result {i} for query: {query}") for i in range(k)
]
# optional: add custom async implementations here
# async def _aget_relevant_documents(
# self,
# query: str,
# *,
# run_manager: AsyncCallbackManagerForRetrieverRun,
# **kwargs: Any,
# ) -> List[Document]: ...

View File

@@ -0,0 +1,73 @@
"""__ModuleName__ toolkits."""
from typing import List
from langchain_core.tools import BaseTool, BaseToolkit
class __ModuleName__Toolkit(BaseToolkit):
# TODO: Replace all TODOs in docstring. See example docstring:
# https://github.com/langchain-ai/langchain/blob/c123cb2b304f52ab65db4714eeec46af69a861ec/libs/community/langchain_community/agent_toolkits/sql/toolkit.py#L19
"""__ModuleName__ toolkit.
# TODO: Replace with relevant packages, env vars, etc.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Populate with relevant params.
Key init args:
arg 1: type
description
arg 2: type
description
# TODO: Replace with relevant init params.
Instantiate:
```python
from __package_name__ import __ModuleName__Toolkit
toolkit = __ModuleName__Toolkit(
# ...
)
```
Tools:
```python
toolkit.get_tools()
```
```txt
# TODO: Example output.
```
Use within an agent:
```python
from langgraph.prebuilt import create_react_agent
agent_executor = create_react_agent(llm, tools)
example_query = "..."
events = agent_executor.stream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
for event in events:
event["messages"][-1].pretty_print()
```
```txt
# TODO: Example output.
```
"""
# TODO: This method must be implemented to list tools.
def get_tools(self) -> List[BaseTool]:
raise NotImplementedError()

View File

@@ -0,0 +1,95 @@
"""__ModuleName__ tools."""
from typing import Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
class __ModuleName__ToolInput(BaseModel):
"""Input schema for __ModuleName__ tool.
This docstring is **not** part of what is sent to the model when performing tool
calling. The Field default values and descriptions **are** part of what is sent to
the model when performing tool calling.
"""
# TODO: Add input args and descriptions.
a: int = Field(..., description="first number to add")
b: int = Field(..., description="second number to add")
class __ModuleName__Tool(BaseTool): # type: ignore[override]
"""__ModuleName__ tool.
Setup:
# TODO: Replace with relevant packages, env vars.
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
Instantiation:
```python
tool = __ModuleName__Tool(
# TODO: init params
)
```
Invocation with args:
```python
# TODO: invoke args
tool.invoke({...})
```
```python
# TODO: output of invocation
```
Invocation with ToolCall:
```python
# TODO: invoke args
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
```
```python
# TODO: output of invocation
```
""" # noqa: E501
# TODO: Set tool name and description
name: str = "TODO: Tool name"
"""The name that is passed to the model when performing tool calling."""
description: str = "TODO: Tool description."
"""The description that is passed to the model when performing tool calling."""
args_schema: Type[BaseModel] = __ModuleName__ToolInput
"""The schema that is passed to the model when performing tool calling."""
# TODO: Add any other init params for the tool.
# param1: str | None
# """param1 determines foobar"""
# TODO: Replaced (a, b) with real tool arguments.
def _run(
self, a: int, b: int, *, run_manager: CallbackManagerForToolRun | None = None
) -> str:
return str(a + b + 80)
# TODO: Implement if tool has native async functionality, otherwise delete.
# async def _arun(
# self,
# a: int,
# b: int,
# *,
# run_manager: AsyncCallbackManagerForToolRun | None = None,
# ) -> str:
# ...

View File

@@ -0,0 +1,438 @@
"""__ModuleName__ vector stores."""
from __future__ import annotations
import uuid
from typing import (
Any,
Callable,
Iterator,
List,
Sequence,
Tuple,
Type,
TypeVar,
)
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from langchain_core.vectorstores.utils import _cosine_similarity as cosine_similarity
VST = TypeVar("VST", bound=VectorStore)
class __ModuleName__VectorStore(VectorStore):
# TODO: Replace all TODOs in docstring.
"""__ModuleName__ vector store integration.
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable `__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Populate with relevant params.
Key init args — indexing params:
collection_name:
Name of the collection.
embedding_function:
Embedding function to use.
# TODO: Populate with relevant params.
Key init args — client params:
client:
Client to use.
connection_args:
Connection arguments.
# TODO: Replace with relevant init params.
Instantiate:
```python
from __module_name__.vectorstores import __ModuleName__VectorStore
from langchain_openai import OpenAIEmbeddings
vector_store = __ModuleName__VectorStore(
collection_name="foo",
embedding_function=OpenAIEmbeddings(),
connection_args={"uri": "./foo.db"},
# other params...
)
```
# TODO: Populate with relevant variables.
Add Documents:
```python
from langchain_core.documents import Document
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
document_3 = Document(page_content="i will be deleted :(")
documents = [document_1, document_2, document_3]
ids = ["1", "2", "3"]
vector_store.add_documents(documents=documents, ids=ids)
```
# TODO: Populate with relevant variables.
Delete Documents:
```python
vector_store.delete(ids=["3"])
```
# TODO: Fill out with relevant variables and example output.
Search:
```python
results = vector_store.similarity_search(query="thud",k=1)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
```
```python
# TODO: Example output
```
# TODO: Fill out with relevant variables and example output.
Search with filter:
```python
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
```
```python
# TODO: Example output
```
# TODO: Fill out with relevant variables and example output.
Search with score:
```python
results = vector_store.similarity_search_with_score(query="qux",k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
```
```python
# TODO: Example output
```
# TODO: Fill out with relevant variables and example output.
Async:
```python
# add documents
# await vector_store.aadd_documents(documents=documents, ids=ids)
# delete documents
# await vector_store.adelete(ids=["3"])
# search
# results = vector_store.asimilarity_search(query="thud",k=1)
# search with score
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
for doc,score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
```
```python
# TODO: Example output
```
# TODO: Fill out with relevant variables and example output.
Use as Retriever:
```python
retriever = vector_store.as_retriever(
search_type="mmr",
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
)
retriever.invoke("thud")
```
```python
# TODO: Example output
```
""" # noqa: E501
def __init__(self, embedding: Embeddings) -> None:
"""Initialize with the given embedding function.
Args:
embedding: embedding function to use.
"""
self._database: dict[str, dict[str, Any]] = {}
self.embedding = embedding
@classmethod
def from_texts(
cls: Type[__ModuleName__VectorStore],
texts: List[str],
embedding: Embeddings,
metadatas: list[dict] | None = None,
**kwargs: Any,
) -> __ModuleName__VectorStore:
store = cls(
embedding=embedding,
)
store.add_texts(texts=texts, metadatas=metadatas, **kwargs)
return store
# optional: add custom async implementations
# @classmethod
# async def afrom_texts(
# cls: Type[VST],
# texts: List[str],
# embedding: Embeddings,
# metadatas: list[dict] | None = None,
# **kwargs: Any,
# ) -> VST:
# return await asyncio.get_running_loop().run_in_executor(
# None, partial(cls.from_texts, **kwargs), texts, embedding, metadatas
# )
@property
def embeddings(self) -> Embeddings:
return self.embedding
def add_documents(
self,
documents: List[Document],
ids: list[str] | None = None,
**kwargs: Any,
) -> List[str]:
"""Add documents to the store."""
texts = [doc.page_content for doc in documents]
vectors = self.embedding.embed_documents(texts)
if ids and len(ids) != len(texts):
msg = (
f"ids must be the same length as texts. "
f"Got {len(ids)} ids and {len(texts)} texts."
)
raise ValueError(msg)
id_iterator: Iterator[str | None] = (
iter(ids) if ids else iter(doc.id for doc in documents)
)
ids_ = []
for doc, vector in zip(documents, vectors):
doc_id = next(id_iterator)
doc_id_ = doc_id if doc_id else str(uuid.uuid4())
ids_.append(doc_id_)
self._database[doc_id_] = {
"id": doc_id_,
"vector": vector,
"text": doc.page_content,
"metadata": doc.metadata,
}
return ids_
# optional: add custom async implementations
# async def aadd_documents(
# self,
# documents: List[Document],
# ids: list[str] | None = None,
# **kwargs: Any,
# ) -> List[str]:
# raise NotImplementedError
def delete(self, ids: list[str] | None = None, **kwargs: Any) -> None:
if ids:
for _id in ids:
self._database.pop(_id, None)
# optional: add custom async implementations
# async def adelete(
# self, ids: list[str] | None = None, **kwargs: Any
# ) -> None:
# raise NotImplementedError
def get_by_ids(self, ids: Sequence[str], /) -> list[Document]:
"""Get documents by their ids.
Args:
ids: The ids of the documents to get.
Returns:
A list of Document objects.
"""
documents = []
for doc_id in ids:
doc = self._database.get(doc_id)
if doc:
documents.append(
Document(
id=doc["id"],
page_content=doc["text"],
metadata=doc["metadata"],
)
)
return documents
# optional: add custom async implementations
# async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]:
# raise NotImplementedError
# NOTE: the below helper method implements similarity search for in-memory
# storage. It is optional and not a part of the vector store interface.
def _similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Callable[[Document], bool] | None = None,
**kwargs: Any,
) -> List[tuple[Document, float, List[float]]]:
# get all docs with fixed order in list
docs = list(self._database.values())
if filter is not None:
docs = [
doc
for doc in docs
if filter(Document(page_content=doc["text"], metadata=doc["metadata"]))
]
if not docs:
return []
similarity = cosine_similarity([embedding], [doc["vector"] for doc in docs])[0]
# get the indices ordered by similarity score
top_k_idx = similarity.argsort()[::-1][:k]
return [
(
# Document
Document(
id=doc_dict["id"],
page_content=doc_dict["text"],
metadata=doc_dict["metadata"],
),
# Score
float(similarity[idx].item()),
# Embedding vector
doc_dict["vector"],
)
for idx in top_k_idx
# Assign using walrus operator to avoid multiple lookups
if (doc_dict := docs[idx])
]
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
embedding = self.embedding.embed_query(query)
return [
doc
for doc, _, _ in self._similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
]
# optional: add custom async implementations
# async def asimilarity_search(
# self, query: str, k: int = 4, **kwargs: Any
# ) -> List[Document]:
# # This is a temporary workaround to make the similarity search
# # asynchronous. The proper solution is to make the similarity search
# # asynchronous in the vector store implementations.
# func = partial(self.similarity_search, query, k=k, **kwargs)
# return await asyncio.get_event_loop().run_in_executor(None, func)
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
embedding = self.embedding.embed_query(query)
return [
(doc, similarity)
for doc, similarity, _ in self._similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
]
# optional: add custom async implementations
# async def asimilarity_search_with_score(
# self, *args: Any, **kwargs: Any
# ) -> List[Tuple[Document, float]]:
# # This is a temporary workaround to make the similarity search
# # asynchronous. The proper solution is to make the similarity search
# # asynchronous in the vector store implementations.
# func = partial(self.similarity_search_with_score, *args, **kwargs)
# return await asyncio.get_event_loop().run_in_executor(None, func)
### ADDITIONAL OPTIONAL SEARCH METHODS BELOW ###
# def similarity_search_by_vector(
# self, embedding: List[float], k: int = 4, **kwargs: Any
# ) -> List[Document]:
# raise NotImplementedError
# optional: add custom async implementations
# async def asimilarity_search_by_vector(
# self, embedding: List[float], k: int = 4, **kwargs: Any
# ) -> List[Document]:
# # This is a temporary workaround to make the similarity search
# # asynchronous. The proper solution is to make the similarity search
# # asynchronous in the vector store implementations.
# func = partial(self.similarity_search_by_vector, embedding, k=k, **kwargs)
# return await asyncio.get_event_loop().run_in_executor(None, func)
# def max_marginal_relevance_search(
# self,
# query: str,
# k: int = 4,
# fetch_k: int = 20,
# lambda_mult: float = 0.5,
# **kwargs: Any,
# ) -> List[Document]:
# raise NotImplementedError
# optional: add custom async implementations
# async def amax_marginal_relevance_search(
# self,
# query: str,
# k: int = 4,
# fetch_k: int = 20,
# lambda_mult: float = 0.5,
# **kwargs: Any,
# ) -> List[Document]:
# # This is a temporary workaround to make the similarity search
# # asynchronous. The proper solution is to make the similarity search
# # asynchronous in the vector store implementations.
# func = partial(
# self.max_marginal_relevance_search,
# query,
# k=k,
# fetch_k=fetch_k,
# lambda_mult=lambda_mult,
# **kwargs,
# )
# return await asyncio.get_event_loop().run_in_executor(None, func)
# def max_marginal_relevance_search_by_vector(
# self,
# embedding: List[float],
# k: int = 4,
# fetch_k: int = 20,
# lambda_mult: float = 0.5,
# **kwargs: Any,
# ) -> List[Document]:
# raise NotImplementedError
# optional: add custom async implementations
# async def amax_marginal_relevance_search_by_vector(
# self,
# embedding: List[float],
# k: int = 4,
# fetch_k: int = 20,
# lambda_mult: float = 0.5,
# **kwargs: Any,
# ) -> List[Document]:
# raise NotImplementedError

View File

@@ -0,0 +1,53 @@
[build-system]
requires = ["pdm-backend"]
build-backend = "pdm.backend"
[project]
name = "__package_name__"
version = "0.1.0"
description = "An integration package connecting __ModuleName__ and LangChain"
authors = []
readme = "README.md"
license = "MIT"
requires-python = ">=3.10.0,<4.0.0"
dependencies = [
"langchain-core>=0.3.15",
]
[project.urls]
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/__package_name_short__"
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22__package_name_short__%3D%3D0%22&expanded=true"
"Repository" = "https://github.com/langchain-ai/langchain"
[tool.mypy]
disallow_untyped_defs = "True"
[tool.uv]
dev-dependencies = [
"pytest>=7.4.3",
"pytest-asyncio>=0.23.2",
"pytest-socket>=0.7.0",
"pytest-watcher>=0.3.4",
"langchain-tests>=0.3.5",
"ruff>=0.5",
"mypy>=1.10",
]
[tool.ruff.lint]
select = ["E", "F", "I", "T201"]
[tool.ruff.lint.flake8-tidy-imports]
ban-relative-imports = "all"
[tool.ruff.lint.per-file-ignores]
"docs/**" = [ "ALL",]
[tool.coverage.run]
omit = ["tests/*"]
[tool.pytest.ini_options]
addopts = "--strict-markers --strict-config --durations=5"
markers = [
"compile: mark placeholder test used to compile integration tests without running them",
]
asyncio_mode = "auto"

View File

@@ -0,0 +1,17 @@
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)

View File

@@ -0,0 +1,18 @@
#!/bin/bash
set -eu
# Initialize a variable to keep track of errors
errors=0
# make sure not importing from langchain, langchain_experimental, or langchain_community
git --no-pager grep '^from langchain\.' . && errors=$((errors+1))
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
git --no-pager grep '^from langchain_community\.' . && errors=$((errors+1))
# Decide on an exit status based on the errors
if [ "$errors" -gt 0 ]; then
exit 1
else
exit 0
fi

View File

@@ -0,0 +1,21 @@
"""Test Chat__ModuleName__ chat model."""
from typing import Type
from __module_name__.chat_models import Chat__ModuleName__
from langchain_tests.integration_tests import ChatModelIntegrationTests
class TestChatParrotLinkIntegration(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[Chat__ModuleName__]:
return Chat__ModuleName__
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "bird-brain-001",
"temperature": 0,
"parrot_buffer_length": 50,
}

View File

@@ -0,0 +1,7 @@
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass

View File

@@ -0,0 +1,16 @@
"""Test __ModuleName__ embeddings."""
from typing import Type
from __module_name__.embeddings import __ModuleName__Embeddings
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
class TestParrotLinkEmbeddingsIntegration(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> Type[__ModuleName__Embeddings]:
return __ModuleName__Embeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "nest-embed-001"}

View File

@@ -0,0 +1,22 @@
from typing import Type
from __module_name__.retrievers import __ModuleName__Retriever
from langchain_tests.integration_tests import (
RetrieversIntegrationTests,
)
class Test__ModuleName__Retriever(RetrieversIntegrationTests):
@property
def retriever_constructor(self) -> Type[__ModuleName__Retriever]:
"""Get an empty vectorstore for unit tests."""
return __ModuleName__Retriever
@property
def retriever_constructor_params(self) -> dict:
return {"k": 2}
@property
def retriever_query_example(self) -> str:
"""Returns a str representing the "query" of an example retriever call."""
return "example query"

View File

@@ -0,0 +1,27 @@
from typing import Type
from __module_name__.tools import __ModuleName__Tool
from langchain_tests.integration_tests import ToolsIntegrationTests
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> Type[__ModuleName__Tool]:
return __ModuleName__Tool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not have
`{"name", "id", "args"}` keys.
"""
return {"a": 2, "b": 3}

View File

@@ -0,0 +1,20 @@
from typing import Generator
import pytest
from __module_name__.vectorstores import __ModuleName__VectorStore
from langchain_core.vectorstores import VectorStore
from langchain_tests.integration_tests import VectorStoreIntegrationTests
class Test__ModuleName__VectorStore(VectorStoreIntegrationTests):
@pytest.fixture()
def vectorstore(self) -> Generator[VectorStore, None, None]: # type: ignore
"""Get an empty vectorstore for unit tests."""
store = __ModuleName__VectorStore(self.get_embeddings())
# note: store should be EMPTY at this point
# if you need to delete data, you may do so here
try:
yield store
finally:
# cleanup operations, or deleting data
pass

View File

@@ -0,0 +1,21 @@
"""Test chat model integration."""
from typing import Type
from __module_name__.chat_models import Chat__ModuleName__
from langchain_tests.unit_tests import ChatModelUnitTests
class TestChat__ModuleName__Unit(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[Chat__ModuleName__]:
return Chat__ModuleName__
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "bird-brain-001",
"temperature": 0,
"parrot_buffer_length": 50,
}

View File

@@ -0,0 +1,16 @@
"""Test embedding model integration."""
from typing import Type
from __module_name__.embeddings import __ModuleName__Embeddings
from langchain_tests.unit_tests import EmbeddingsUnitTests
class TestParrotLinkEmbeddingsUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[__ModuleName__Embeddings]:
return __ModuleName__Embeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "nest-embed-001"}

View File

@@ -0,0 +1,27 @@
from typing import Type
from __module_name__.tools import __ModuleName__Tool
from langchain_tests.unit_tests import ToolsUnitTests
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> Type[__ModuleName__Tool]:
return __ModuleName__Tool
@property
def tool_constructor_params(self) -> dict:
# If your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not have
`{"name", "id", "args"}` keys.
"""
return {"a": 2, "b": 3}

View File

@@ -0,0 +1 @@
"""Namespaces."""

View File

@@ -0,0 +1,371 @@
"""Manage LangChain apps."""
import shutil
import subprocess
import sys
import warnings
from pathlib import Path
from typing import Annotated
import typer
import uvicorn
from langchain_cli.utils.events import create_events
from langchain_cli.utils.git import (
DependencySource,
copy_repo,
parse_dependencies,
update_repo,
)
from langchain_cli.utils.packages import (
LangServeExport,
get_langserve_export,
get_package_root,
)
from langchain_cli.utils.pyproject import (
add_dependencies_to_pyproject_toml,
remove_dependencies_from_pyproject_toml,
)
REPO_DIR = Path(typer.get_app_dir("langchain")) / "git_repos"
app_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@app_cli.command()
def new(
name: Annotated[
str | None,
typer.Argument(
help="The name of the folder to create",
),
] = None,
*,
package: Annotated[
list[str] | None,
typer.Option(help="Packages to seed the project with"),
] = None,
pip: Annotated[
bool | None,
typer.Option(
"--pip/--no-pip",
help="Pip install the template(s) as editable dependencies",
),
] = None,
noninteractive: Annotated[
bool,
typer.Option(
"--non-interactive/--interactive",
help="Don't prompt for any input",
),
] = False,
) -> None:
"""Create a new LangServe application."""
has_packages = package is not None and len(package) > 0
if noninteractive:
if name is None:
msg = "name is required when --non-interactive is set"
raise typer.BadParameter(msg)
name_str = name
pip_bool = bool(pip) # None should be false
else:
name_str = name or typer.prompt("What folder would you like to create?")
if not has_packages:
package = []
package_prompt = "What package would you like to add? (leave blank to skip)"
while True:
package_str = typer.prompt(
package_prompt,
default="",
show_default=False,
)
if not package_str:
break
package.append(package_str)
package_prompt = (
f"{len(package)} added. Any more packages (leave blank to end)?"
)
has_packages = len(package) > 0
pip_bool = False
if pip is None and has_packages:
pip_bool = typer.confirm(
"Would you like to install these templates into your environment "
"with pip?",
default=False,
)
# copy over template from ../project_template
project_template_dir = Path(__file__).parents[1] / "project_template"
destination_dir = Path.cwd() / name_str if name_str != "." else Path.cwd()
app_name = name_str if name_str != "." else Path.cwd().name
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(readme_contents.replace("__app_name__", app_name))
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(pyproject_contents.replace("__app_name__", app_name))
# add packages if specified
if has_packages:
add(package, project_dir=destination_dir, pip=pip_bool)
typer.echo(f'\n\nSuccess! Created a new LangChain app under "./{app_name}"!\n\n')
typer.echo("Next, enter your new app directory by running:\n")
typer.echo(f" cd ./{app_name}\n")
typer.echo("Then add templates with commands like:\n")
typer.echo(" langchain app add extraction-openai-functions")
typer.echo(
" langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n",
)
@app_cli.command()
def add(
dependencies: Annotated[
list[str] | None,
typer.Argument(help="The dependency to add"),
] = None,
*,
api_path: Annotated[
list[str] | None,
typer.Option(help="API paths to add"),
] = None,
project_dir: Annotated[
Path | None,
typer.Option(help="The project directory"),
] = None,
repo: Annotated[
list[str] | None,
typer.Option(help="Install templates from a specific github repo instead"),
] = None,
branch: Annotated[
list[str] | None,
typer.Option(help="Install templates from a specific branch"),
] = None,
pip: Annotated[
bool,
typer.Option(
"--pip/--no-pip",
help="Pip install the template(s) as editable dependencies",
prompt="Would you like to `pip install -e` the template(s)?",
),
],
) -> None:
"""Add the specified template to the current LangServe app.
e.g.:
`langchain app add extraction-openai-functions`
`langchain app add git+ssh://git@github.com/efriis/simple-pirate.git`
"""
if branch is None:
branch = []
if repo is None:
repo = []
if api_path is None:
api_path = []
if not branch and not repo:
warnings.warn(
"Adding templates from the default branch and repo is deprecated."
" At a minimum, you will have to add `--branch v0.2` for this to work",
stacklevel=2,
)
parsed_deps = parse_dependencies(dependencies, repo, branch, api_path)
project_root = get_package_root(project_dir)
package_dir = project_root / "packages"
create_events(
[{"event": "serve add", "properties": {"parsed_dep": d}} for d in parsed_deps],
)
# group by repo/ref
grouped: dict[tuple[str, str | None], list[DependencySource]] = {}
for dep in parsed_deps:
key_tup = (dep["git"], dep["ref"])
lst = grouped.get(key_tup, [])
lst.append(dep)
grouped[key_tup] = lst
installed_destination_paths: list[Path] = []
installed_destination_names: list[str] = []
installed_exports: list[LangServeExport] = []
for (git, ref), group_deps in grouped.items():
if len(group_deps) == 1:
typer.echo(f"Adding {git}@{ref}...")
else:
typer.echo(f"Adding {len(group_deps)} templates from {git}@{ref}")
source_repo_path = update_repo(git, ref, REPO_DIR)
for dep in group_deps:
source_path = (
source_repo_path / dep["subdirectory"]
if dep["subdirectory"]
else source_repo_path
)
pyproject_path = source_path / "pyproject.toml"
if not pyproject_path.exists():
typer.echo(f"Could not find {pyproject_path}")
continue
langserve_export = get_langserve_export(pyproject_path)
# default path to package_name
inner_api_path = dep["api_path"] or langserve_export["package_name"]
destination_path = package_dir / inner_api_path
if destination_path.exists():
typer.echo(
f"Folder {inner_api_path} already exists. Skipping...",
)
continue
copy_repo(source_path, destination_path)
typer.echo(f" - Downloaded {dep['subdirectory']} to {inner_api_path}")
installed_destination_paths.append(destination_path)
installed_destination_names.append(inner_api_path)
installed_exports.append(langserve_export)
if len(installed_destination_paths) == 0:
typer.echo("No packages installed. Exiting.")
return
try:
add_dependencies_to_pyproject_toml(
project_root / "pyproject.toml",
zip(installed_destination_names, installed_destination_paths, strict=False),
)
except Exception:
# Can fail if user modified/removed pyproject.toml
typer.echo("Failed to add dependencies to pyproject.toml, continuing...")
try:
cwd = Path.cwd()
installed_destination_strs = [
str(p.relative_to(cwd)) for p in installed_destination_paths
]
except ValueError:
# Can fail if the cwd is not a parent of the package
typer.echo("Failed to print install command, continuing...")
else:
if pip:
cmd = ["pip", "install", "-e", *installed_destination_strs]
cmd_str = " \\\n ".join(installed_destination_strs)
typer.echo(f"Running: pip install -e \\\n {cmd_str}")
subprocess.run(cmd, cwd=cwd, check=True) # noqa: S603
chain_names = []
for e in installed_exports:
original_candidate = f"{e['package_name'].replace('-', '_')}_chain"
candidate = original_candidate
i = 2
while candidate in chain_names:
candidate = original_candidate + "_" + str(i)
i += 1
chain_names.append(candidate)
api_paths = [
str(Path("/") / path.relative_to(package_dir))
for path in installed_destination_paths
]
imports = [
f"from {e['module']} import {e['attr']} as {name}"
for e, name in zip(installed_exports, chain_names, strict=False)
]
routes = [
f'add_routes(app, {name}, path="{path}")'
for name, path in zip(chain_names, api_paths, strict=False)
]
t = (
"this template"
if len(chain_names) == 1
else f"these {len(chain_names)} templates"
)
lines = [
"",
f"To use {t}, add the following to your app:\n\n```",
"",
*imports,
"",
*routes,
"```",
]
typer.echo("\n".join(lines))
@app_cli.command()
def remove(
api_paths: Annotated[list[str], typer.Argument(help="The API paths to remove")],
*,
project_dir: Annotated[
Path | None,
typer.Option(help="The project directory"),
] = None,
) -> None:
"""Remove the specified package from the current LangServe app."""
project_root = get_package_root(project_dir)
project_pyproject = project_root / "pyproject.toml"
package_root = project_root / "packages"
remove_deps: list[str] = []
for api_path in api_paths:
package_dir = package_root / api_path
if not package_dir.exists():
typer.echo(f"Package {api_path} does not exist. Skipping...")
continue
try:
pyproject = package_dir / "pyproject.toml"
langserve_export = get_langserve_export(pyproject)
typer.echo(f"Removing {langserve_export['package_name']}...")
shutil.rmtree(package_dir)
remove_deps.append(api_path)
except OSError as exc:
typer.echo(f"Failed to remove {api_path}: {exc}")
try:
remove_dependencies_from_pyproject_toml(project_pyproject, remove_deps)
except Exception:
# Can fail if user modified/removed pyproject.toml
typer.echo("Failed to remove dependencies from pyproject.toml.")
@app_cli.command()
def serve(
*,
port: Annotated[
int | None,
typer.Option(help="The port to run the server on"),
] = None,
host: Annotated[
str | None,
typer.Option(help="The host to run the server on"),
] = None,
app: Annotated[
str | None,
typer.Option(help="The app to run, e.g. `app.server:app`"),
] = None,
) -> None:
"""Start the LangServe app."""
# add current dir as first entry of path
sys.path.append(str(Path.cwd()))
app_str = app if app is not None else "app.server:app"
host_str = host if host is not None else "127.0.0.1"
uvicorn.run(
app_str,
host=host_str,
port=port if port is not None else 8000,
reload=True,
)

View File

@@ -0,0 +1,260 @@
"""Develop integration packages for LangChain."""
import os
import re
import shutil
import subprocess
from pathlib import Path
from typing import Annotated, cast
import typer
from typing_extensions import TypedDict
from langchain_cli.utils.find_replace import replace_file, replace_glob
integration_cli = typer.Typer(no_args_is_help=True, add_completion=False)
class Replacements(TypedDict):
"""Replacements."""
__package_name__: str
__module_name__: str
__ModuleName__: str
__MODULE_NAME__: str
__package_name_short__: str
__package_name_short_snake__: str
def _process_name(name: str, *, community: bool = False) -> Replacements:
preprocessed = name.replace("_", "-").lower()
preprocessed = preprocessed.removeprefix("langchain-")
if not re.match(r"^[a-z][a-z0-9-]*$", preprocessed):
msg = (
"Name should only contain lowercase letters (a-z), numbers, and hyphens"
", and start with a letter."
)
raise ValueError(msg)
if preprocessed.endswith("-"):
msg = "Name should not end with `-`."
raise ValueError(msg)
if preprocessed.find("--") != -1:
msg = "Name should not contain consecutive hyphens."
raise ValueError(msg)
replacements: Replacements = {
"__package_name__": f"langchain-{preprocessed}",
"__module_name__": "langchain_" + preprocessed.replace("-", "_"),
"__ModuleName__": preprocessed.title().replace("-", ""),
"__MODULE_NAME__": preprocessed.upper().replace("-", ""),
"__package_name_short__": preprocessed,
"__package_name_short_snake__": preprocessed.replace("-", "_"),
}
if community:
replacements["__module_name__"] = preprocessed.replace("-", "_")
return replacements
@integration_cli.command()
def new(
name: Annotated[
str,
typer.Option(
help="The name of the integration to create (e.g. `my-integration`)",
prompt="The name of the integration to create (e.g. `my-integration`)",
),
],
name_class: Annotated[
str | None,
typer.Option(
help="The name of the integration in PascalCase. e.g. `MyIntegration`."
" This is used to name classes like `MyIntegrationVectorStore`",
),
] = None,
src: Annotated[
list[str] | None,
typer.Option(
help="The name of the single template file to copy."
" e.g. `--src integration_template/chat_models.py "
"--dst my_integration/chat_models.py`. Can be used multiple times.",
),
] = None,
dst: Annotated[
list[str] | None,
typer.Option(
help="The relative path to the integration package to place the new file in"
". e.g. `my-integration/my_integration.py`",
),
] = None,
) -> None:
"""Create a new integration package."""
try:
replacements = _process_name(name)
except ValueError as e:
typer.echo(e)
raise typer.Exit(code=1) from None
if name_class:
if not re.match(r"^[A-Z][a-zA-Z0-9]*$", name_class):
typer.echo(
"Name should only contain letters (a-z, A-Z), numbers, and underscores"
", and start with a capital letter.",
)
raise typer.Exit(code=1)
replacements["__ModuleName__"] = name_class
else:
replacements["__ModuleName__"] = typer.prompt(
"Name of integration in PascalCase",
default=replacements["__ModuleName__"],
)
project_template_dir = Path(__file__).parents[1] / "integration_template"
destination_dir = Path.cwd() / replacements["__package_name__"]
if not src and not dst:
if destination_dir.exists():
typer.echo(f"Folder {destination_dir} exists.")
raise typer.Exit(code=1)
# Copy over template from ../integration_template
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=False)
# Folder movement
package_dir = destination_dir / replacements["__module_name__"]
shutil.move(destination_dir / "integration_template", package_dir)
# Replacements in files
replace_glob(destination_dir, "**/*", cast("dict[str, str]", replacements))
# Dependency install
try:
# Use --no-progress to avoid tty issues in CI/test environments
env = os.environ.copy()
env.pop("UV_FROZEN", None)
env.pop("VIRTUAL_ENV", None)
subprocess.run(
["uv", "sync", "--dev", "--no-progress"], # noqa: S607
cwd=destination_dir,
check=True,
env=env,
)
except FileNotFoundError:
typer.echo(
"uv is not installed. Skipping dependency installation; run "
"`uv sync --dev` manually if needed.",
)
except subprocess.CalledProcessError:
typer.echo(
"Failed to install dependencies. You may need to run "
"`uv sync --dev` manually in the package directory.",
)
else:
# Confirm src and dst are the same length
if not src:
typer.echo("Cannot provide --dst without --src.")
raise typer.Exit(code=1)
src_paths = [project_template_dir / p for p in src]
if dst and len(src) != len(dst):
typer.echo("Number of --src and --dst arguments must match.")
raise typer.Exit(code=1)
if not dst:
# Assume we're in a package dir, copy to equivalent path
dst_paths = [destination_dir / p for p in src]
else:
dst_paths = [Path.cwd() / p for p in dst]
dst_paths = [
p / f"{replacements['__package_name_short_snake__']}.ipynb"
if not p.suffix
else p
for p in dst_paths
]
# Confirm no duplicate dst_paths
if len(dst_paths) != len(set(dst_paths)):
typer.echo(
"Duplicate destination paths provided or computed - please "
"specify them explicitly with --dst.",
)
raise typer.Exit(code=1)
# Confirm no files exist at dst_paths
for dst_path in dst_paths:
if dst_path.exists():
typer.echo(f"File {dst_path} exists.")
raise typer.Exit(code=1)
for src_path, dst_path in zip(src_paths, dst_paths, strict=False):
shutil.copy(src_path, dst_path)
replace_file(dst_path, cast("dict[str, str]", replacements))
TEMPLATE_MAP: dict[str, str] = {
"ChatModel": "chat.ipynb",
"DocumentLoader": "document_loaders.ipynb",
"Tool": "tools.ipynb",
"VectorStore": "vectorstores.ipynb",
"Embeddings": "text_embedding.ipynb",
"ByteStore": "kv_store.ipynb",
"LLM": "llms.ipynb",
"Provider": "provider.ipynb",
"Toolkit": "toolkits.ipynb",
"Retriever": "retrievers.ipynb",
}
_component_types_str = ", ".join(f"`{k}`" for k in TEMPLATE_MAP)
@integration_cli.command()
def create_doc(
name: Annotated[
str,
typer.Option(
help=(
"The kebab-case name of the integration (e.g. `openai`, "
"`google-vertexai`). Do not include a 'langchain-' prefix."
),
prompt=(
"The kebab-case name of the integration (e.g. `openai`, "
"`google-vertexai`). Do not include a 'langchain-' prefix."
),
),
],
name_class: Annotated[
str | None,
typer.Option(
help=(
"The PascalCase name of the integration (e.g. `OpenAI`, "
"`VertexAI`). Do not include a 'Chat', 'VectorStore', etc. "
"prefix/suffix."
),
),
] = None,
component_type: Annotated[
str,
typer.Option(
help=(
f"The type of component. Currently supported: {_component_types_str}."
),
),
] = "ChatModel",
destination_dir: Annotated[
str,
typer.Option(
help="The relative path to the docs directory to place the new file in.",
prompt="The relative path to the docs directory to place the new file in.",
),
] = "docs/docs/integrations/chat/",
) -> None:
"""Create a new integration doc."""
if component_type not in TEMPLATE_MAP:
typer.echo(
f"Unrecognized {component_type=}. Expected one of {_component_types_str}.",
)
raise typer.Exit(code=1)
new(
name=name,
name_class=name_class,
src=[f"docs/{TEMPLATE_MAP[component_type]}"],
dst=[destination_dir],
)

View File

@@ -0,0 +1,2 @@
.gritmodules*
*.log

View File

@@ -0,0 +1,3 @@
version: 0.0.1
patterns:
- name: github.com/getgrit/stdlib#*

View File

@@ -0,0 +1,56 @@
# Testing the replace_imports migration
This runs the v0.2 migration with a desired set of rules.
```grit
language python
langchain_all_migrations()
```
## Single import
Before:
```python
from langchain.chat_models import ChatOpenAI
```
After:
```python
from langchain_community.chat_models import ChatOpenAI
```
## Community to partner
```python
from langchain_community.chat_models import ChatOpenAI
```
```python
from langchain_openai import ChatOpenAI
```
## Noop
This file should not match at all.
```python
from foo import ChatOpenAI
```
## Mixed imports
```python
from langchain_community.chat_models import ChatOpenAI, ChatAnthropic, foo
```
```python
from langchain_community.chat_models import foo
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
```

View File

@@ -0,0 +1,15 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_anthropic() {
find_replace_imports(list=[
[`langchain_community.chat_models.anthropic`, `ChatAnthropic`, `langchain_anthropic`, `ChatAnthropic`],
[`langchain_community.llms.anthropic`, `Anthropic`, `langchain_anthropic`, `Anthropic`],
[`langchain_community.chat_models`, `ChatAnthropic`, `langchain_anthropic`, `ChatAnthropic`],
[`langchain_community.llms`, `Anthropic`, `langchain_anthropic`, `Anthropic`]
])
}
// Add this for invoking directly
langchain_migrate_anthropic()

View File

@@ -0,0 +1,67 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_astradb() {
find_replace_imports(list=[
[
`langchain_community.vectorstores.astradb`,
`AstraDB`,
`langchain_astradb`,
`AstraDBVectorStore`
]
,
[
`langchain_community.storage.astradb`,
`AstraDBByteStore`,
`langchain_astradb`,
`AstraDBByteStore`
]
,
[
`langchain_community.storage.astradb`,
`AstraDBStore`,
`langchain_astradb`,
`AstraDBStore`
]
,
[
`langchain_community.cache`,
`AstraDBCache`,
`langchain_astradb`,
`AstraDBCache`
]
,
[
`langchain_community.cache`,
`AstraDBSemanticCache`,
`langchain_astradb`,
`AstraDBSemanticCache`
]
,
[
`langchain_community.chat_message_histories.astradb`,
`AstraDBChatMessageHistory`,
`langchain_astradb`,
`AstraDBChatMessageHistory`
]
,
[
`langchain_community.document_loaders.astradb`,
`AstraDBLoader`,
`langchain_astradb`,
`AstraDBLoader`
]
])
}
// Add this for invoking directly
langchain_migrate_astradb()

View File

@@ -0,0 +1,38 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_community_to_core() {
find_replace_imports(list=[
[`langchain_community.callbacks.tracers`, `ConsoleCallbackHandler`, `langchain_core.tracers`, `ConsoleCallbackHandler`],
[`langchain_community.callbacks.tracers`, `FunctionCallbackHandler`, `langchain_core.tracers.stdout`, `FunctionCallbackHandler`],
[`langchain_community.callbacks.tracers`, `LangChainTracer`, `langchain_core.tracers`, `LangChainTracer`],
[`langchain_community.callbacks.tracers`, `LangChainTracerV1`, `langchain_core.tracers.langchain_v1`, `LangChainTracerV1`],
[`langchain_community.docstore.document`, `Document`, `langchain_core.documents`, `Document`],
[`langchain_community.document_loaders`, `Blob`, `langchain_core.document_loaders`, `Blob`],
[`langchain_community.document_loaders`, `BlobLoader`, `langchain_core.document_loaders`, `BlobLoader`],
[`langchain_community.document_loaders.base`, `BaseBlobParser`, `langchain_core.document_loaders`, `BaseBlobParser`],
[`langchain_community.document_loaders.base`, `BaseLoader`, `langchain_core.document_loaders`, `BaseLoader`],
[`langchain_community.document_loaders.blob_loaders`, `Blob`, `langchain_core.document_loaders`, `Blob`],
[`langchain_community.document_loaders.blob_loaders`, `BlobLoader`, `langchain_core.document_loaders`, `BlobLoader`],
[`langchain_community.document_loaders.blob_loaders.schema`, `Blob`, `langchain_core.document_loaders`, `Blob`],
[`langchain_community.document_loaders.blob_loaders.schema`, `BlobLoader`, `langchain_core.document_loaders`, `BlobLoader`],
[`langchain_community.tools`, `BaseTool`, `langchain_core.tools`, `BaseTool`],
[`langchain_community.tools`, `StructuredTool`, `langchain_core.tools`, `StructuredTool`],
[`langchain_community.tools`, `Tool`, `langchain_core.tools`, `Tool`],
[`langchain_community.tools`, `format_tool_to_openai_function`, `langchain_core.utils.function_calling`, `format_tool_to_openai_function`],
[`langchain_community.tools`, `tool`, `langchain_core.tools`, `tool`],
[`langchain_community.tools.convert_to_openai`, `format_tool_to_openai_function`, `langchain_core.utils.function_calling`, `format_tool_to_openai_function`],
[`langchain_community.tools.convert_to_openai`, `format_tool_to_openai_tool`, `langchain_core.utils.function_calling`, `format_tool_to_openai_tool`],
[`langchain_community.tools.render`, `format_tool_to_openai_function`, `langchain_core.utils.function_calling`, `format_tool_to_openai_function`],
[`langchain_community.tools.render`, `format_tool_to_openai_tool`, `langchain_core.utils.function_calling`, `format_tool_to_openai_tool`],
[`langchain_community.utils.openai_functions`, `FunctionDescription`, `langchain_core.utils.function_calling`, `FunctionDescription`],
[`langchain_community.utils.openai_functions`, `ToolDescription`, `langchain_core.utils.function_calling`, `ToolDescription`],
[`langchain_community.utils.openai_functions`, `convert_pydantic_to_openai_function`, `langchain_core.utils.function_calling`, `convert_pydantic_to_openai_function`],
[`langchain_community.utils.openai_functions`, `convert_pydantic_to_openai_tool`, `langchain_core.utils.function_calling`, `convert_pydantic_to_openai_tool`],
[`langchain_community.vectorstores`, `VectorStore`, `langchain_core.vectorstores`, `VectorStore`]
])
}
// Add this for invoking directly
langchain_migrate_community_to_core()

View File

@@ -0,0 +1,101 @@
[
[
"langchain_community.callbacks.tracers.ConsoleCallbackHandler",
"langchain_core.tracers.ConsoleCallbackHandler"
],
[
"langchain_community.callbacks.tracers.FunctionCallbackHandler",
"langchain_core.tracers.stdout.FunctionCallbackHandler"
],
[
"langchain_community.callbacks.tracers.LangChainTracer",
"langchain_core.tracers.LangChainTracer"
],
[
"langchain_community.callbacks.tracers.LangChainTracerV1",
"langchain_core.tracers.langchain_v1.LangChainTracerV1"
],
[
"langchain_community.docstore.document.Document",
"langchain_core.documents.Document"
],
[
"langchain_community.document_loaders.Blob",
"langchain_core.document_loaders.Blob"
],
[
"langchain_community.document_loaders.BlobLoader",
"langchain_core.document_loaders.BlobLoader"
],
[
"langchain_community.document_loaders.base.BaseBlobParser",
"langchain_core.document_loaders.BaseBlobParser"
],
[
"langchain_community.document_loaders.base.BaseLoader",
"langchain_core.document_loaders.BaseLoader"
],
[
"langchain_community.document_loaders.blob_loaders.Blob",
"langchain_core.document_loaders.Blob"
],
[
"langchain_community.document_loaders.blob_loaders.BlobLoader",
"langchain_core.document_loaders.BlobLoader"
],
[
"langchain_community.document_loaders.blob_loaders.schema.Blob",
"langchain_core.document_loaders.Blob"
],
[
"langchain_community.document_loaders.blob_loaders.schema.BlobLoader",
"langchain_core.document_loaders.BlobLoader"
],
["langchain_community.tools.BaseTool", "langchain_core.tools.BaseTool"],
[
"langchain_community.tools.StructuredTool",
"langchain_core.tools.StructuredTool"
],
["langchain_community.tools.Tool", "langchain_core.tools.Tool"],
[
"langchain_community.tools.format_tool_to_openai_function",
"langchain_core.utils.function_calling.format_tool_to_openai_function"
],
["langchain_community.tools.tool", "langchain_core.tools.tool"],
[
"langchain_community.tools.convert_to_openai.format_tool_to_openai_function",
"langchain_core.utils.function_calling.format_tool_to_openai_function"
],
[
"langchain_community.tools.convert_to_openai.format_tool_to_openai_tool",
"langchain_core.utils.function_calling.format_tool_to_openai_tool"
],
[
"langchain_community.tools.render.format_tool_to_openai_function",
"langchain_core.utils.function_calling.format_tool_to_openai_function"
],
[
"langchain_community.tools.render.format_tool_to_openai_tool",
"langchain_core.utils.function_calling.format_tool_to_openai_tool"
],
[
"langchain_community.utils.openai_functions.FunctionDescription",
"langchain_core.utils.function_calling.FunctionDescription"
],
[
"langchain_community.utils.openai_functions.ToolDescription",
"langchain_core.utils.function_calling.ToolDescription"
],
[
"langchain_community.utils.openai_functions.convert_pydantic_to_openai_function",
"langchain_core.utils.function_calling.convert_pydantic_to_openai_function"
],
[
"langchain_community.utils.openai_functions.convert_pydantic_to_openai_tool",
"langchain_core.utils.function_calling.convert_pydantic_to_openai_tool"
],
[
"langchain_community.vectorstores.VectorStore",
"langchain_core.vectorstores.VectorStore"
]
]

View File

@@ -0,0 +1,18 @@
language python
pattern langchain_all_migrations() {
any {
langchain_migrate_community_to_core(),
langchain_migrate_fireworks(),
langchain_migrate_ibm(),
langchain_migrate_langchain_to_core(),
langchain_migrate_langchain_to_langchain_community(),
langchain_migrate_langchain_to_textsplitters(),
langchain_migrate_openai(),
langchain_migrate_pinecone(),
langchain_migrate_anthropic(),
replace_pydantic_v1_shim()
}
}
langchain_all_migrations()

View File

@@ -0,0 +1,15 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_fireworks() {
find_replace_imports(list=[
[`langchain_community.chat_models.fireworks`, `ChatFireworks`, `langchain_fireworks`, `ChatFireworks`],
[`langchain_community.llms.fireworks`, `Fireworks`, `langchain_fireworks`, `Fireworks`],
[`langchain_community.chat_models`, `ChatFireworks`, `langchain_fireworks`, `ChatFireworks`],
[`langchain_community.llms`, `Fireworks`, `langchain_fireworks`, `Fireworks`]
])
}
// Add this for invoking directly
langchain_migrate_fireworks()

View File

@@ -0,0 +1,13 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_ibm() {
find_replace_imports(list=[
[`langchain_community.llms.watsonxllm`, `WatsonxLLM`, `langchain_ibm`, `WatsonxLLM`],
[`langchain_community.llms`, `WatsonxLLM`, `langchain_ibm`, `WatsonxLLM`]
])
}
// Add this for invoking directly
langchain_migrate_ibm()

View File

@@ -0,0 +1,542 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_langchain_to_core() {
find_replace_imports(list=[
[`langchain._api`, `deprecated`, `langchain_core._api`, `deprecated`],
[`langchain._api`, `LangChainDeprecationWarning`, `langchain_core._api`, `LangChainDeprecationWarning`],
[`langchain._api`, `suppress_langchain_deprecation_warning`, `langchain_core._api`, `suppress_langchain_deprecation_warning`],
[`langchain._api`, `surface_langchain_deprecation_warnings`, `langchain_core._api`, `surface_langchain_deprecation_warnings`],
[`langchain._api`, `warn_deprecated`, `langchain_core._api`, `warn_deprecated`],
[`langchain._api.deprecation`, `LangChainDeprecationWarning`, `langchain_core._api`, `LangChainDeprecationWarning`],
[`langchain._api.deprecation`, `LangChainPendingDeprecationWarning`, `langchain_core._api.deprecation`, `LangChainPendingDeprecationWarning`],
[`langchain._api.deprecation`, `deprecated`, `langchain_core._api`, `deprecated`],
[`langchain._api.deprecation`, `suppress_langchain_deprecation_warning`, `langchain_core._api`, `suppress_langchain_deprecation_warning`],
[`langchain._api.deprecation`, `warn_deprecated`, `langchain_core._api`, `warn_deprecated`],
[`langchain._api.deprecation`, `surface_langchain_deprecation_warnings`, `langchain_core._api`, `surface_langchain_deprecation_warnings`],
[`langchain._api.path`, `get_relative_path`, `langchain_core._api`, `get_relative_path`],
[`langchain._api.path`, `as_import_path`, `langchain_core._api`, `as_import_path`],
[`langchain.agents`, `Tool`, `langchain_core.tools`, `Tool`],
[`langchain.agents`, `tool`, `langchain_core.tools`, `tool`],
[`langchain.agents.tools`, `BaseTool`, `langchain_core.tools`, `BaseTool`],
[`langchain.agents.tools`, `tool`, `langchain_core.tools`, `tool`],
[`langchain.agents.tools`, `Tool`, `langchain_core.tools`, `Tool`],
[`langchain.base_language`, `BaseLanguageModel`, `langchain_core.language_models`, `BaseLanguageModel`],
[`langchain.callbacks`, `StdOutCallbackHandler`, `langchain_core.callbacks`, `StdOutCallbackHandler`],
[`langchain.callbacks`, `StreamingStdOutCallbackHandler`, `langchain_core.callbacks`, `StreamingStdOutCallbackHandler`],
[`langchain.callbacks`, `LangChainTracer`, `langchain_core.tracers`, `LangChainTracer`],
[`langchain.callbacks`, `tracing_enabled`, `langchain_core.tracers.context`, `tracing_enabled`],
[`langchain.callbacks`, `tracing_v2_enabled`, `langchain_core.tracers.context`, `tracing_v2_enabled`],
[`langchain.callbacks`, `collect_runs`, `langchain_core.tracers.context`, `collect_runs`],
[`langchain.callbacks.base`, `RetrieverManagerMixin`, `langchain_core.callbacks`, `RetrieverManagerMixin`],
[`langchain.callbacks.base`, `LLMManagerMixin`, `langchain_core.callbacks`, `LLMManagerMixin`],
[`langchain.callbacks.base`, `ChainManagerMixin`, `langchain_core.callbacks`, `ChainManagerMixin`],
[`langchain.callbacks.base`, `ToolManagerMixin`, `langchain_core.callbacks`, `ToolManagerMixin`],
[`langchain.callbacks.base`, `CallbackManagerMixin`, `langchain_core.callbacks`, `CallbackManagerMixin`],
[`langchain.callbacks.base`, `RunManagerMixin`, `langchain_core.callbacks`, `RunManagerMixin`],
[`langchain.callbacks.base`, `BaseCallbackHandler`, `langchain_core.callbacks`, `BaseCallbackHandler`],
[`langchain.callbacks.base`, `AsyncCallbackHandler`, `langchain_core.callbacks`, `AsyncCallbackHandler`],
[`langchain.callbacks.base`, `BaseCallbackManager`, `langchain_core.callbacks`, `BaseCallbackManager`],
[`langchain.callbacks.manager`, `BaseRunManager`, `langchain_core.callbacks`, `BaseRunManager`],
[`langchain.callbacks.manager`, `RunManager`, `langchain_core.callbacks`, `RunManager`],
[`langchain.callbacks.manager`, `ParentRunManager`, `langchain_core.callbacks`, `ParentRunManager`],
[`langchain.callbacks.manager`, `AsyncRunManager`, `langchain_core.callbacks`, `AsyncRunManager`],
[`langchain.callbacks.manager`, `AsyncParentRunManager`, `langchain_core.callbacks`, `AsyncParentRunManager`],
[`langchain.callbacks.manager`, `CallbackManagerForLLMRun`, `langchain_core.callbacks`, `CallbackManagerForLLMRun`],
[`langchain.callbacks.manager`, `AsyncCallbackManagerForLLMRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForLLMRun`],
[`langchain.callbacks.manager`, `CallbackManagerForChainRun`, `langchain_core.callbacks`, `CallbackManagerForChainRun`],
[`langchain.callbacks.manager`, `AsyncCallbackManagerForChainRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForChainRun`],
[`langchain.callbacks.manager`, `CallbackManagerForToolRun`, `langchain_core.callbacks`, `CallbackManagerForToolRun`],
[`langchain.callbacks.manager`, `AsyncCallbackManagerForToolRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForToolRun`],
[`langchain.callbacks.manager`, `CallbackManagerForRetrieverRun`, `langchain_core.callbacks`, `CallbackManagerForRetrieverRun`],
[`langchain.callbacks.manager`, `AsyncCallbackManagerForRetrieverRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForRetrieverRun`],
[`langchain.callbacks.manager`, `CallbackManager`, `langchain_core.callbacks`, `CallbackManager`],
[`langchain.callbacks.manager`, `CallbackManagerForChainGroup`, `langchain_core.callbacks`, `CallbackManagerForChainGroup`],
[`langchain.callbacks.manager`, `AsyncCallbackManager`, `langchain_core.callbacks`, `AsyncCallbackManager`],
[`langchain.callbacks.manager`, `AsyncCallbackManagerForChainGroup`, `langchain_core.callbacks`, `AsyncCallbackManagerForChainGroup`],
[`langchain.callbacks.manager`, `tracing_enabled`, `langchain_core.tracers.context`, `tracing_enabled`],
[`langchain.callbacks.manager`, `tracing_v2_enabled`, `langchain_core.tracers.context`, `tracing_v2_enabled`],
[`langchain.callbacks.manager`, `collect_runs`, `langchain_core.tracers.context`, `collect_runs`],
[`langchain.callbacks.manager`, `atrace_as_chain_group`, `langchain_core.callbacks.manager`, `atrace_as_chain_group`],
[`langchain.callbacks.manager`, `trace_as_chain_group`, `langchain_core.callbacks.manager`, `trace_as_chain_group`],
[`langchain.callbacks.manager`, `handle_event`, `langchain_core.callbacks.manager`, `handle_event`],
[`langchain.callbacks.manager`, `ahandle_event`, `langchain_core.callbacks.manager`, `ahandle_event`],
[`langchain.callbacks.manager`, `env_var_is_set`, `langchain_core.utils.env`, `env_var_is_set`],
[`langchain.callbacks.stdout`, `StdOutCallbackHandler`, `langchain_core.callbacks`, `StdOutCallbackHandler`],
[`langchain.callbacks.streaming_stdout`, `StreamingStdOutCallbackHandler`, `langchain_core.callbacks`, `StreamingStdOutCallbackHandler`],
[`langchain.callbacks.tracers`, `ConsoleCallbackHandler`, `langchain_core.tracers`, `ConsoleCallbackHandler`],
[`langchain.callbacks.tracers`, `FunctionCallbackHandler`, `langchain_core.tracers.stdout`, `FunctionCallbackHandler`],
[`langchain.callbacks.tracers`, `LangChainTracer`, `langchain_core.tracers`, `LangChainTracer`],
[`langchain.callbacks.tracers`, `LangChainTracerV1`, `langchain_core.tracers.langchain_v1`, `LangChainTracerV1`],
[`langchain.callbacks.tracers.base`, `BaseTracer`, `langchain_core.tracers`, `BaseTracer`],
[`langchain.callbacks.tracers.base`, `TracerException`, `langchain_core.exceptions`, `TracerException`],
[`langchain.callbacks.tracers.evaluation`, `wait_for_all_evaluators`, `langchain_core.tracers.evaluation`, `wait_for_all_evaluators`],
[`langchain.callbacks.tracers.evaluation`, `EvaluatorCallbackHandler`, `langchain_core.tracers`, `EvaluatorCallbackHandler`],
[`langchain.callbacks.tracers.langchain`, `LangChainTracer`, `langchain_core.tracers`, `LangChainTracer`],
[`langchain.callbacks.tracers.langchain`, `wait_for_all_tracers`, `langchain_core.tracers.langchain`, `wait_for_all_tracers`],
[`langchain.callbacks.tracers.langchain_v1`, `LangChainTracerV1`, `langchain_core.tracers.langchain_v1`, `LangChainTracerV1`],
[`langchain.callbacks.tracers.log_stream`, `LogEntry`, `langchain_core.tracers.log_stream`, `LogEntry`],
[`langchain.callbacks.tracers.log_stream`, `RunState`, `langchain_core.tracers.log_stream`, `RunState`],
[`langchain.callbacks.tracers.log_stream`, `RunLog`, `langchain_core.tracers`, `RunLog`],
[`langchain.callbacks.tracers.log_stream`, `RunLogPatch`, `langchain_core.tracers`, `RunLogPatch`],
[`langchain.callbacks.tracers.log_stream`, `LogStreamCallbackHandler`, `langchain_core.tracers`, `LogStreamCallbackHandler`],
[`langchain.callbacks.tracers.root_listeners`, `RootListenersTracer`, `langchain_core.tracers.root_listeners`, `RootListenersTracer`],
[`langchain.callbacks.tracers.run_collector`, `RunCollectorCallbackHandler`, `langchain_core.tracers.run_collector`, `RunCollectorCallbackHandler`],
[`langchain.callbacks.tracers.schemas`, `BaseRun`, `langchain_core.tracers.schemas`, `BaseRun`],
[`langchain.callbacks.tracers.schemas`, `ChainRun`, `langchain_core.tracers.schemas`, `ChainRun`],
[`langchain.callbacks.tracers.schemas`, `LLMRun`, `langchain_core.tracers.schemas`, `LLMRun`],
[`langchain.callbacks.tracers.schemas`, `Run`, `langchain_core.tracers`, `Run`],
[`langchain.callbacks.tracers.schemas`, `RunTypeEnum`, `langchain_core.tracers.schemas`, `RunTypeEnum`],
[`langchain.callbacks.tracers.schemas`, `ToolRun`, `langchain_core.tracers.schemas`, `ToolRun`],
[`langchain.callbacks.tracers.schemas`, `TracerSession`, `langchain_core.tracers.schemas`, `TracerSession`],
[`langchain.callbacks.tracers.schemas`, `TracerSessionBase`, `langchain_core.tracers.schemas`, `TracerSessionBase`],
[`langchain.callbacks.tracers.schemas`, `TracerSessionV1`, `langchain_core.tracers.schemas`, `TracerSessionV1`],
[`langchain.callbacks.tracers.schemas`, `TracerSessionV1Base`, `langchain_core.tracers.schemas`, `TracerSessionV1Base`],
[`langchain.callbacks.tracers.schemas`, `TracerSessionV1Create`, `langchain_core.tracers.schemas`, `TracerSessionV1Create`],
[`langchain.callbacks.tracers.stdout`, `FunctionCallbackHandler`, `langchain_core.tracers.stdout`, `FunctionCallbackHandler`],
[`langchain.callbacks.tracers.stdout`, `ConsoleCallbackHandler`, `langchain_core.tracers`, `ConsoleCallbackHandler`],
[`langchain.chains.openai_functions`, `convert_to_openai_function`, `langchain_core.utils.function_calling`, `convert_to_openai_function`],
[`langchain.chains.openai_functions.base`, `convert_to_openai_function`, `langchain_core.utils.function_calling`, `convert_to_openai_function`],
[`langchain.chat_models.base`, `BaseChatModel`, `langchain_core.language_models`, `BaseChatModel`],
[`langchain.chat_models.base`, `SimpleChatModel`, `langchain_core.language_models`, `SimpleChatModel`],
[`langchain.chat_models.base`, `generate_from_stream`, `langchain_core.language_models.chat_models`, `generate_from_stream`],
[`langchain.chat_models.base`, `agenerate_from_stream`, `langchain_core.language_models.chat_models`, `agenerate_from_stream`],
[`langchain.docstore.document`, `Document`, `langchain_core.documents`, `Document`],
[`langchain.document_loaders`, `Blob`, `langchain_core.document_loaders`, `Blob`],
[`langchain.document_loaders`, `BlobLoader`, `langchain_core.document_loaders`, `BlobLoader`],
[`langchain.document_loaders.base`, `BaseLoader`, `langchain_core.document_loaders`, `BaseLoader`],
[`langchain.document_loaders.base`, `BaseBlobParser`, `langchain_core.document_loaders`, `BaseBlobParser`],
[`langchain.document_loaders.blob_loaders`, `BlobLoader`, `langchain_core.document_loaders`, `BlobLoader`],
[`langchain.document_loaders.blob_loaders`, `Blob`, `langchain_core.document_loaders`, `Blob`],
[`langchain.document_loaders.blob_loaders.schema`, `Blob`, `langchain_core.document_loaders`, `Blob`],
[`langchain.document_loaders.blob_loaders.schema`, `BlobLoader`, `langchain_core.document_loaders`, `BlobLoader`],
[`langchain.embeddings.base`, `Embeddings`, `langchain_core.embeddings`, `Embeddings`],
[`langchain.formatting`, `StrictFormatter`, `langchain_core.utils`, `StrictFormatter`],
[`langchain.input`, `get_bolded_text`, `langchain_core.utils`, `get_bolded_text`],
[`langchain.input`, `get_color_mapping`, `langchain_core.utils`, `get_color_mapping`],
[`langchain.input`, `get_colored_text`, `langchain_core.utils`, `get_colored_text`],
[`langchain.input`, `print_text`, `langchain_core.utils`, `print_text`],
[`langchain.llms.base`, `BaseLanguageModel`, `langchain_core.language_models`, `BaseLanguageModel`],
[`langchain.llms.base`, `BaseLLM`, `langchain_core.language_models`, `BaseLLM`],
[`langchain.llms.base`, `LLM`, `langchain_core.language_models`, `LLM`],
[`langchain.load`, `dumpd`, `langchain_core.load`, `dumpd`],
[`langchain.load`, `dumps`, `langchain_core.load`, `dumps`],
[`langchain.load`, `load`, `langchain_core.load`, `load`],
[`langchain.load`, `loads`, `langchain_core.load`, `loads`],
[`langchain.load.dump`, `default`, `langchain_core.load.dump`, `default`],
[`langchain.load.dump`, `dumps`, `langchain_core.load`, `dumps`],
[`langchain.load.dump`, `dumpd`, `langchain_core.load`, `dumpd`],
[`langchain.load.load`, `Reviver`, `langchain_core.load.load`, `Reviver`],
[`langchain.load.load`, `loads`, `langchain_core.load`, `loads`],
[`langchain.load.load`, `load`, `langchain_core.load`, `load`],
[`langchain.load.serializable`, `BaseSerialized`, `langchain_core.load.serializable`, `BaseSerialized`],
[`langchain.load.serializable`, `SerializedConstructor`, `langchain_core.load.serializable`, `SerializedConstructor`],
[`langchain.load.serializable`, `SerializedSecret`, `langchain_core.load.serializable`, `SerializedSecret`],
[`langchain.load.serializable`, `SerializedNotImplemented`, `langchain_core.load.serializable`, `SerializedNotImplemented`],
[`langchain.load.serializable`, `try_neq_default`, `langchain_core.load.serializable`, `try_neq_default`],
[`langchain.load.serializable`, `Serializable`, `langchain_core.load`, `Serializable`],
[`langchain.load.serializable`, `to_json_not_implemented`, `langchain_core.load.serializable`, `to_json_not_implemented`],
[`langchain.output_parsers`, `CommaSeparatedListOutputParser`, `langchain_core.output_parsers`, `CommaSeparatedListOutputParser`],
[`langchain.output_parsers`, `ListOutputParser`, `langchain_core.output_parsers`, `ListOutputParser`],
[`langchain.output_parsers`, `MarkdownListOutputParser`, `langchain_core.output_parsers`, `MarkdownListOutputParser`],
[`langchain.output_parsers`, `NumberedListOutputParser`, `langchain_core.output_parsers`, `NumberedListOutputParser`],
[`langchain.output_parsers`, `PydanticOutputParser`, `langchain_core.output_parsers`, `PydanticOutputParser`],
[`langchain.output_parsers`, `XMLOutputParser`, `langchain_core.output_parsers`, `XMLOutputParser`],
[`langchain.output_parsers`, `JsonOutputToolsParser`, `langchain_core.output_parsers.openai_tools`, `JsonOutputToolsParser`],
[`langchain.output_parsers`, `PydanticToolsParser`, `langchain_core.output_parsers.openai_tools`, `PydanticToolsParser`],
[`langchain.output_parsers`, `JsonOutputKeyToolsParser`, `langchain_core.output_parsers.openai_tools`, `JsonOutputKeyToolsParser`],
[`langchain.output_parsers.json`, `SimpleJsonOutputParser`, `langchain_core.output_parsers`, `JsonOutputParser`],
[`langchain.output_parsers.json`, `parse_partial_json`, `langchain_core.utils.json`, `parse_partial_json`],
[`langchain.output_parsers.json`, `parse_json_markdown`, `langchain_core.utils.json`, `parse_json_markdown`],
[`langchain.output_parsers.json`, `parse_and_check_json_markdown`, `langchain_core.utils.json`, `parse_and_check_json_markdown`],
[`langchain.output_parsers.list`, `ListOutputParser`, `langchain_core.output_parsers`, `ListOutputParser`],
[`langchain.output_parsers.list`, `CommaSeparatedListOutputParser`, `langchain_core.output_parsers`, `CommaSeparatedListOutputParser`],
[`langchain.output_parsers.list`, `NumberedListOutputParser`, `langchain_core.output_parsers`, `NumberedListOutputParser`],
[`langchain.output_parsers.list`, `MarkdownListOutputParser`, `langchain_core.output_parsers`, `MarkdownListOutputParser`],
[`langchain.output_parsers.openai_functions`, `PydanticOutputFunctionsParser`, `langchain_core.output_parsers.openai_functions`, `PydanticOutputFunctionsParser`],
[`langchain.output_parsers.openai_functions`, `PydanticAttrOutputFunctionsParser`, `langchain_core.output_parsers.openai_functions`, `PydanticAttrOutputFunctionsParser`],
[`langchain.output_parsers.openai_functions`, `JsonOutputFunctionsParser`, `langchain_core.output_parsers.openai_functions`, `JsonOutputFunctionsParser`],
[`langchain.output_parsers.openai_functions`, `JsonKeyOutputFunctionsParser`, `langchain_core.output_parsers.openai_functions`, `JsonKeyOutputFunctionsParser`],
[`langchain.output_parsers.openai_tools`, `PydanticToolsParser`, `langchain_core.output_parsers.openai_tools`, `PydanticToolsParser`],
[`langchain.output_parsers.openai_tools`, `JsonOutputToolsParser`, `langchain_core.output_parsers.openai_tools`, `JsonOutputToolsParser`],
[`langchain.output_parsers.openai_tools`, `JsonOutputKeyToolsParser`, `langchain_core.output_parsers.openai_tools`, `JsonOutputKeyToolsParser`],
[`langchain.output_parsers.pydantic`, `PydanticOutputParser`, `langchain_core.output_parsers`, `PydanticOutputParser`],
[`langchain.output_parsers.xml`, `XMLOutputParser`, `langchain_core.output_parsers`, `XMLOutputParser`],
[`langchain.prompts`, `AIMessagePromptTemplate`, `langchain_core.prompts`, `AIMessagePromptTemplate`],
[`langchain.prompts`, `BaseChatPromptTemplate`, `langchain_core.prompts`, `BaseChatPromptTemplate`],
[`langchain.prompts`, `BasePromptTemplate`, `langchain_core.prompts`, `BasePromptTemplate`],
[`langchain.prompts`, `ChatMessagePromptTemplate`, `langchain_core.prompts`, `ChatMessagePromptTemplate`],
[`langchain.prompts`, `ChatPromptTemplate`, `langchain_core.prompts`, `ChatPromptTemplate`],
[`langchain.prompts`, `FewShotPromptTemplate`, `langchain_core.prompts`, `FewShotPromptTemplate`],
[`langchain.prompts`, `FewShotPromptWithTemplates`, `langchain_core.prompts`, `FewShotPromptWithTemplates`],
[`langchain.prompts`, `HumanMessagePromptTemplate`, `langchain_core.prompts`, `HumanMessagePromptTemplate`],
[`langchain.prompts`, `LengthBasedExampleSelector`, `langchain_core.example_selectors`, `LengthBasedExampleSelector`],
[`langchain.prompts`, `MaxMarginalRelevanceExampleSelector`, `langchain_core.example_selectors`, `MaxMarginalRelevanceExampleSelector`],
[`langchain.prompts`, `MessagesPlaceholder`, `langchain_core.prompts`, `MessagesPlaceholder`],
[`langchain.prompts`, `PipelinePromptTemplate`, `langchain_core.prompts`, `PipelinePromptTemplate`],
[`langchain.prompts`, `PromptTemplate`, `langchain_core.prompts`, `PromptTemplate`],
[`langchain.prompts`, `SemanticSimilarityExampleSelector`, `langchain_core.example_selectors`, `SemanticSimilarityExampleSelector`],
[`langchain.prompts`, `StringPromptTemplate`, `langchain_core.prompts`, `StringPromptTemplate`],
[`langchain.prompts`, `SystemMessagePromptTemplate`, `langchain_core.prompts`, `SystemMessagePromptTemplate`],
[`langchain.prompts`, `load_prompt`, `langchain_core.prompts`, `load_prompt`],
[`langchain.prompts`, `FewShotChatMessagePromptTemplate`, `langchain_core.prompts`, `FewShotChatMessagePromptTemplate`],
[`langchain.prompts`, `Prompt`, `langchain_core.prompts`, `PromptTemplate`],
[`langchain.prompts.base`, `jinja2_formatter`, `langchain_core.prompts`, `jinja2_formatter`],
[`langchain.prompts.base`, `validate_jinja2`, `langchain_core.prompts`, `validate_jinja2`],
[`langchain.prompts.base`, `check_valid_template`, `langchain_core.prompts`, `check_valid_template`],
[`langchain.prompts.base`, `get_template_variables`, `langchain_core.prompts`, `get_template_variables`],
[`langchain.prompts.base`, `StringPromptTemplate`, `langchain_core.prompts`, `StringPromptTemplate`],
[`langchain.prompts.base`, `BasePromptTemplate`, `langchain_core.prompts`, `BasePromptTemplate`],
[`langchain.prompts.base`, `StringPromptValue`, `langchain_core.prompt_values`, `StringPromptValue`],
[`langchain.prompts.base`, `_get_jinja2_variables_from_template`, `langchain_core.prompts.string`, `_get_jinja2_variables_from_template`],
[`langchain.prompts.chat`, `BaseMessagePromptTemplate`, `langchain_core.prompts.chat`, `BaseMessagePromptTemplate`],
[`langchain.prompts.chat`, `MessagesPlaceholder`, `langchain_core.prompts`, `MessagesPlaceholder`],
[`langchain.prompts.chat`, `BaseStringMessagePromptTemplate`, `langchain_core.prompts.chat`, `BaseStringMessagePromptTemplate`],
[`langchain.prompts.chat`, `ChatMessagePromptTemplate`, `langchain_core.prompts`, `ChatMessagePromptTemplate`],
[`langchain.prompts.chat`, `HumanMessagePromptTemplate`, `langchain_core.prompts`, `HumanMessagePromptTemplate`],
[`langchain.prompts.chat`, `AIMessagePromptTemplate`, `langchain_core.prompts`, `AIMessagePromptTemplate`],
[`langchain.prompts.chat`, `SystemMessagePromptTemplate`, `langchain_core.prompts`, `SystemMessagePromptTemplate`],
[`langchain.prompts.chat`, `BaseChatPromptTemplate`, `langchain_core.prompts`, `BaseChatPromptTemplate`],
[`langchain.prompts.chat`, `ChatPromptTemplate`, `langchain_core.prompts`, `ChatPromptTemplate`],
[`langchain.prompts.chat`, `ChatPromptValue`, `langchain_core.prompt_values`, `ChatPromptValue`],
[`langchain.prompts.chat`, `ChatPromptValueConcrete`, `langchain_core.prompt_values`, `ChatPromptValueConcrete`],
[`langchain.prompts.chat`, `_convert_to_message`, `langchain_core.prompts.chat`, `_convert_to_message`],
[`langchain.prompts.chat`, `_create_template_from_message_type`, `langchain_core.prompts.chat`, `_create_template_from_message_type`],
[`langchain.prompts.example_selector`, `LengthBasedExampleSelector`, `langchain_core.example_selectors`, `LengthBasedExampleSelector`],
[`langchain.prompts.example_selector`, `MaxMarginalRelevanceExampleSelector`, `langchain_core.example_selectors`, `MaxMarginalRelevanceExampleSelector`],
[`langchain.prompts.example_selector`, `SemanticSimilarityExampleSelector`, `langchain_core.example_selectors`, `SemanticSimilarityExampleSelector`],
[`langchain.prompts.example_selector.base`, `BaseExampleSelector`, `langchain_core.example_selectors`, `BaseExampleSelector`],
[`langchain.prompts.example_selector.length_based`, `LengthBasedExampleSelector`, `langchain_core.example_selectors`, `LengthBasedExampleSelector`],
[`langchain.prompts.example_selector.semantic_similarity`, `sorted_values`, `langchain_core.example_selectors`, `sorted_values`],
[`langchain.prompts.example_selector.semantic_similarity`, `SemanticSimilarityExampleSelector`, `langchain_core.example_selectors`, `SemanticSimilarityExampleSelector`],
[`langchain.prompts.example_selector.semantic_similarity`, `MaxMarginalRelevanceExampleSelector`, `langchain_core.example_selectors`, `MaxMarginalRelevanceExampleSelector`],
[`langchain.prompts.few_shot`, `FewShotPromptTemplate`, `langchain_core.prompts`, `FewShotPromptTemplate`],
[`langchain.prompts.few_shot`, `FewShotChatMessagePromptTemplate`, `langchain_core.prompts`, `FewShotChatMessagePromptTemplate`],
[`langchain.prompts.few_shot`, `_FewShotPromptTemplateMixin`, `langchain_core.prompts.few_shot`, `_FewShotPromptTemplateMixin`],
[`langchain.prompts.few_shot_with_templates`, `FewShotPromptWithTemplates`, `langchain_core.prompts`, `FewShotPromptWithTemplates`],
[`langchain.prompts.loading`, `load_prompt_from_config`, `langchain_core.prompts.loading`, `load_prompt_from_config`],
[`langchain.prompts.loading`, `load_prompt`, `langchain_core.prompts`, `load_prompt`],
[`langchain.prompts.loading`, `try_load_from_hub`, `langchain_core.utils`, `try_load_from_hub`],
[`langchain.prompts.loading`, `_load_examples`, `langchain_core.prompts.loading`, `_load_examples`],
[`langchain.prompts.loading`, `_load_few_shot_prompt`, `langchain_core.prompts.loading`, `_load_few_shot_prompt`],
[`langchain.prompts.loading`, `_load_output_parser`, `langchain_core.prompts.loading`, `_load_output_parser`],
[`langchain.prompts.loading`, `_load_prompt`, `langchain_core.prompts.loading`, `_load_prompt`],
[`langchain.prompts.loading`, `_load_prompt_from_file`, `langchain_core.prompts.loading`, `_load_prompt_from_file`],
[`langchain.prompts.loading`, `_load_template`, `langchain_core.prompts.loading`, `_load_template`],
[`langchain.prompts.pipeline`, `PipelinePromptTemplate`, `langchain_core.prompts`, `PipelinePromptTemplate`],
[`langchain.prompts.pipeline`, `_get_inputs`, `langchain_core.prompts.pipeline`, `_get_inputs`],
[`langchain.prompts.prompt`, `PromptTemplate`, `langchain_core.prompts`, `PromptTemplate`],
[`langchain.prompts.prompt`, `Prompt`, `langchain_core.prompts`, `PromptTemplate`],
[`langchain.schema`, `BaseCache`, `langchain_core.caches`, `BaseCache`],
[`langchain.schema`, `BaseMemory`, `langchain_core.memory`, `BaseMemory`],
[`langchain.schema`, `BaseStore`, `langchain_core.stores`, `BaseStore`],
[`langchain.schema`, `AgentFinish`, `langchain_core.agents`, `AgentFinish`],
[`langchain.schema`, `AgentAction`, `langchain_core.agents`, `AgentAction`],
[`langchain.schema`, `Document`, `langchain_core.documents`, `Document`],
[`langchain.schema`, `BaseChatMessageHistory`, `langchain_core.chat_history`, `BaseChatMessageHistory`],
[`langchain.schema`, `BaseDocumentTransformer`, `langchain_core.documents`, `BaseDocumentTransformer`],
[`langchain.schema`, `BaseMessage`, `langchain_core.messages`, `BaseMessage`],
[`langchain.schema`, `ChatMessage`, `langchain_core.messages`, `ChatMessage`],
[`langchain.schema`, `FunctionMessage`, `langchain_core.messages`, `FunctionMessage`],
[`langchain.schema`, `HumanMessage`, `langchain_core.messages`, `HumanMessage`],
[`langchain.schema`, `AIMessage`, `langchain_core.messages`, `AIMessage`],
[`langchain.schema`, `SystemMessage`, `langchain_core.messages`, `SystemMessage`],
[`langchain.schema`, `messages_from_dict`, `langchain_core.messages`, `messages_from_dict`],
[`langchain.schema`, `messages_to_dict`, `langchain_core.messages`, `messages_to_dict`],
[`langchain.schema`, `message_to_dict`, `langchain_core.messages`, `message_to_dict`],
[`langchain.schema`, `_message_to_dict`, `langchain_core.messages`, `message_to_dict`],
[`langchain.schema`, `_message_from_dict`, `langchain_core.messages`, `_message_from_dict`],
[`langchain.schema`, `get_buffer_string`, `langchain_core.messages`, `get_buffer_string`],
[`langchain.schema`, `RunInfo`, `langchain_core.outputs`, `RunInfo`],
[`langchain.schema`, `LLMResult`, `langchain_core.outputs`, `LLMResult`],
[`langchain.schema`, `ChatResult`, `langchain_core.outputs`, `ChatResult`],
[`langchain.schema`, `ChatGeneration`, `langchain_core.outputs`, `ChatGeneration`],
[`langchain.schema`, `Generation`, `langchain_core.outputs`, `Generation`],
[`langchain.schema`, `PromptValue`, `langchain_core.prompt_values`, `PromptValue`],
[`langchain.schema`, `LangChainException`, `langchain_core.exceptions`, `LangChainException`],
[`langchain.schema`, `BaseRetriever`, `langchain_core.retrievers`, `BaseRetriever`],
[`langchain.schema`, `Memory`, `langchain_core.memory`, `BaseMemory`],
[`langchain.schema`, `OutputParserException`, `langchain_core.exceptions`, `OutputParserException`],
[`langchain.schema`, `StrOutputParser`, `langchain_core.output_parsers`, `StrOutputParser`],
[`langchain.schema`, `BaseOutputParser`, `langchain_core.output_parsers`, `BaseOutputParser`],
[`langchain.schema`, `BaseLLMOutputParser`, `langchain_core.output_parsers`, `BaseLLMOutputParser`],
[`langchain.schema`, `BasePromptTemplate`, `langchain_core.prompts`, `BasePromptTemplate`],
[`langchain.schema`, `format_document`, `langchain_core.prompts`, `format_document`],
[`langchain.schema.agent`, `AgentAction`, `langchain_core.agents`, `AgentAction`],
[`langchain.schema.agent`, `AgentActionMessageLog`, `langchain_core.agents`, `AgentActionMessageLog`],
[`langchain.schema.agent`, `AgentFinish`, `langchain_core.agents`, `AgentFinish`],
[`langchain.schema.cache`, `BaseCache`, `langchain_core.caches`, `BaseCache`],
[`langchain.schema.callbacks.base`, `RetrieverManagerMixin`, `langchain_core.callbacks`, `RetrieverManagerMixin`],
[`langchain.schema.callbacks.base`, `LLMManagerMixin`, `langchain_core.callbacks`, `LLMManagerMixin`],
[`langchain.schema.callbacks.base`, `ChainManagerMixin`, `langchain_core.callbacks`, `ChainManagerMixin`],
[`langchain.schema.callbacks.base`, `ToolManagerMixin`, `langchain_core.callbacks`, `ToolManagerMixin`],
[`langchain.schema.callbacks.base`, `CallbackManagerMixin`, `langchain_core.callbacks`, `CallbackManagerMixin`],
[`langchain.schema.callbacks.base`, `RunManagerMixin`, `langchain_core.callbacks`, `RunManagerMixin`],
[`langchain.schema.callbacks.base`, `BaseCallbackHandler`, `langchain_core.callbacks`, `BaseCallbackHandler`],
[`langchain.schema.callbacks.base`, `AsyncCallbackHandler`, `langchain_core.callbacks`, `AsyncCallbackHandler`],
[`langchain.schema.callbacks.base`, `BaseCallbackManager`, `langchain_core.callbacks`, `BaseCallbackManager`],
[`langchain.schema.callbacks.manager`, `tracing_enabled`, `langchain_core.tracers.context`, `tracing_enabled`],
[`langchain.schema.callbacks.manager`, `tracing_v2_enabled`, `langchain_core.tracers.context`, `tracing_v2_enabled`],
[`langchain.schema.callbacks.manager`, `collect_runs`, `langchain_core.tracers.context`, `collect_runs`],
[`langchain.schema.callbacks.manager`, `trace_as_chain_group`, `langchain_core.callbacks.manager`, `trace_as_chain_group`],
[`langchain.schema.callbacks.manager`, `handle_event`, `langchain_core.callbacks.manager`, `handle_event`],
[`langchain.schema.callbacks.manager`, `BaseRunManager`, `langchain_core.callbacks`, `BaseRunManager`],
[`langchain.schema.callbacks.manager`, `RunManager`, `langchain_core.callbacks`, `RunManager`],
[`langchain.schema.callbacks.manager`, `ParentRunManager`, `langchain_core.callbacks`, `ParentRunManager`],
[`langchain.schema.callbacks.manager`, `AsyncRunManager`, `langchain_core.callbacks`, `AsyncRunManager`],
[`langchain.schema.callbacks.manager`, `AsyncParentRunManager`, `langchain_core.callbacks`, `AsyncParentRunManager`],
[`langchain.schema.callbacks.manager`, `CallbackManagerForLLMRun`, `langchain_core.callbacks`, `CallbackManagerForLLMRun`],
[`langchain.schema.callbacks.manager`, `AsyncCallbackManagerForLLMRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForLLMRun`],
[`langchain.schema.callbacks.manager`, `CallbackManagerForChainRun`, `langchain_core.callbacks`, `CallbackManagerForChainRun`],
[`langchain.schema.callbacks.manager`, `AsyncCallbackManagerForChainRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForChainRun`],
[`langchain.schema.callbacks.manager`, `CallbackManagerForToolRun`, `langchain_core.callbacks`, `CallbackManagerForToolRun`],
[`langchain.schema.callbacks.manager`, `AsyncCallbackManagerForToolRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForToolRun`],
[`langchain.schema.callbacks.manager`, `CallbackManagerForRetrieverRun`, `langchain_core.callbacks`, `CallbackManagerForRetrieverRun`],
[`langchain.schema.callbacks.manager`, `AsyncCallbackManagerForRetrieverRun`, `langchain_core.callbacks`, `AsyncCallbackManagerForRetrieverRun`],
[`langchain.schema.callbacks.manager`, `CallbackManager`, `langchain_core.callbacks`, `CallbackManager`],
[`langchain.schema.callbacks.manager`, `CallbackManagerForChainGroup`, `langchain_core.callbacks`, `CallbackManagerForChainGroup`],
[`langchain.schema.callbacks.manager`, `AsyncCallbackManager`, `langchain_core.callbacks`, `AsyncCallbackManager`],
[`langchain.schema.callbacks.manager`, `AsyncCallbackManagerForChainGroup`, `langchain_core.callbacks`, `AsyncCallbackManagerForChainGroup`],
[`langchain.schema.callbacks.manager`, `register_configure_hook`, `langchain_core.tracers.context`, `register_configure_hook`],
[`langchain.schema.callbacks.manager`, `env_var_is_set`, `langchain_core.utils.env`, `env_var_is_set`],
[`langchain.schema.callbacks.stdout`, `StdOutCallbackHandler`, `langchain_core.callbacks`, `StdOutCallbackHandler`],
[`langchain.schema.callbacks.streaming_stdout`, `StreamingStdOutCallbackHandler`, `langchain_core.callbacks`, `StreamingStdOutCallbackHandler`],
[`langchain.schema.callbacks.tracers.base`, `TracerException`, `langchain_core.exceptions`, `TracerException`],
[`langchain.schema.callbacks.tracers.base`, `BaseTracer`, `langchain_core.tracers`, `BaseTracer`],
[`langchain.schema.callbacks.tracers.evaluation`, `wait_for_all_evaluators`, `langchain_core.tracers.evaluation`, `wait_for_all_evaluators`],
[`langchain.schema.callbacks.tracers.evaluation`, `EvaluatorCallbackHandler`, `langchain_core.tracers`, `EvaluatorCallbackHandler`],
[`langchain.schema.callbacks.tracers.langchain`, `log_error_once`, `langchain_core.tracers.langchain`, `log_error_once`],
[`langchain.schema.callbacks.tracers.langchain`, `wait_for_all_tracers`, `langchain_core.tracers.langchain`, `wait_for_all_tracers`],
[`langchain.schema.callbacks.tracers.langchain`, `get_client`, `langchain_core.tracers.langchain`, `get_client`],
[`langchain.schema.callbacks.tracers.langchain`, `LangChainTracer`, `langchain_core.tracers`, `LangChainTracer`],
[`langchain.schema.callbacks.tracers.langchain_v1`, `get_headers`, `langchain_core.tracers.langchain_v1`, `get_headers`],
[`langchain.schema.callbacks.tracers.langchain_v1`, `LangChainTracerV1`, `langchain_core.tracers.langchain_v1`, `LangChainTracerV1`],
[`langchain.schema.callbacks.tracers.log_stream`, `LogEntry`, `langchain_core.tracers.log_stream`, `LogEntry`],
[`langchain.schema.callbacks.tracers.log_stream`, `RunState`, `langchain_core.tracers.log_stream`, `RunState`],
[`langchain.schema.callbacks.tracers.log_stream`, `RunLogPatch`, `langchain_core.tracers`, `RunLogPatch`],
[`langchain.schema.callbacks.tracers.log_stream`, `RunLog`, `langchain_core.tracers`, `RunLog`],
[`langchain.schema.callbacks.tracers.log_stream`, `LogStreamCallbackHandler`, `langchain_core.tracers`, `LogStreamCallbackHandler`],
[`langchain.schema.callbacks.tracers.root_listeners`, `RootListenersTracer`, `langchain_core.tracers.root_listeners`, `RootListenersTracer`],
[`langchain.schema.callbacks.tracers.run_collector`, `RunCollectorCallbackHandler`, `langchain_core.tracers.run_collector`, `RunCollectorCallbackHandler`],
[`langchain.schema.callbacks.tracers.schemas`, `RunTypeEnum`, `langchain_core.tracers.schemas`, `RunTypeEnum`],
[`langchain.schema.callbacks.tracers.schemas`, `TracerSessionV1Base`, `langchain_core.tracers.schemas`, `TracerSessionV1Base`],
[`langchain.schema.callbacks.tracers.schemas`, `TracerSessionV1Create`, `langchain_core.tracers.schemas`, `TracerSessionV1Create`],
[`langchain.schema.callbacks.tracers.schemas`, `TracerSessionV1`, `langchain_core.tracers.schemas`, `TracerSessionV1`],
[`langchain.schema.callbacks.tracers.schemas`, `TracerSessionBase`, `langchain_core.tracers.schemas`, `TracerSessionBase`],
[`langchain.schema.callbacks.tracers.schemas`, `TracerSession`, `langchain_core.tracers.schemas`, `TracerSession`],
[`langchain.schema.callbacks.tracers.schemas`, `BaseRun`, `langchain_core.tracers.schemas`, `BaseRun`],
[`langchain.schema.callbacks.tracers.schemas`, `LLMRun`, `langchain_core.tracers.schemas`, `LLMRun`],
[`langchain.schema.callbacks.tracers.schemas`, `ChainRun`, `langchain_core.tracers.schemas`, `ChainRun`],
[`langchain.schema.callbacks.tracers.schemas`, `ToolRun`, `langchain_core.tracers.schemas`, `ToolRun`],
[`langchain.schema.callbacks.tracers.schemas`, `Run`, `langchain_core.tracers`, `Run`],
[`langchain.schema.callbacks.tracers.stdout`, `try_json_stringify`, `langchain_core.tracers.stdout`, `try_json_stringify`],
[`langchain.schema.callbacks.tracers.stdout`, `elapsed`, `langchain_core.tracers.stdout`, `elapsed`],
[`langchain.schema.callbacks.tracers.stdout`, `FunctionCallbackHandler`, `langchain_core.tracers.stdout`, `FunctionCallbackHandler`],
[`langchain.schema.callbacks.tracers.stdout`, `ConsoleCallbackHandler`, `langchain_core.tracers`, `ConsoleCallbackHandler`],
[`langchain.schema.chat`, `ChatSession`, `langchain_core.chat_sessions`, `ChatSession`],
[`langchain.schema.chat_history`, `BaseChatMessageHistory`, `langchain_core.chat_history`, `BaseChatMessageHistory`],
[`langchain.schema.document`, `Document`, `langchain_core.documents`, `Document`],
[`langchain.schema.document`, `BaseDocumentTransformer`, `langchain_core.documents`, `BaseDocumentTransformer`],
[`langchain.schema.embeddings`, `Embeddings`, `langchain_core.embeddings`, `Embeddings`],
[`langchain.schema.exceptions`, `LangChainException`, `langchain_core.exceptions`, `LangChainException`],
[`langchain.schema.language_model`, `BaseLanguageModel`, `langchain_core.language_models`, `BaseLanguageModel`],
[`langchain.schema.language_model`, `_get_token_ids_default_method`, `langchain_core.language_models.base`, `_get_token_ids_default_method`],
[`langchain.schema.memory`, `BaseMemory`, `langchain_core.memory`, `BaseMemory`],
[`langchain.schema.messages`, `get_buffer_string`, `langchain_core.messages`, `get_buffer_string`],
[`langchain.schema.messages`, `BaseMessage`, `langchain_core.messages`, `BaseMessage`],
[`langchain.schema.messages`, `merge_content`, `langchain_core.messages`, `merge_content`],
[`langchain.schema.messages`, `BaseMessageChunk`, `langchain_core.messages`, `BaseMessageChunk`],
[`langchain.schema.messages`, `HumanMessage`, `langchain_core.messages`, `HumanMessage`],
[`langchain.schema.messages`, `HumanMessageChunk`, `langchain_core.messages`, `HumanMessageChunk`],
[`langchain.schema.messages`, `AIMessage`, `langchain_core.messages`, `AIMessage`],
[`langchain.schema.messages`, `AIMessageChunk`, `langchain_core.messages`, `AIMessageChunk`],
[`langchain.schema.messages`, `SystemMessage`, `langchain_core.messages`, `SystemMessage`],
[`langchain.schema.messages`, `SystemMessageChunk`, `langchain_core.messages`, `SystemMessageChunk`],
[`langchain.schema.messages`, `FunctionMessage`, `langchain_core.messages`, `FunctionMessage`],
[`langchain.schema.messages`, `FunctionMessageChunk`, `langchain_core.messages`, `FunctionMessageChunk`],
[`langchain.schema.messages`, `ToolMessage`, `langchain_core.messages`, `ToolMessage`],
[`langchain.schema.messages`, `ToolMessageChunk`, `langchain_core.messages`, `ToolMessageChunk`],
[`langchain.schema.messages`, `ChatMessage`, `langchain_core.messages`, `ChatMessage`],
[`langchain.schema.messages`, `ChatMessageChunk`, `langchain_core.messages`, `ChatMessageChunk`],
[`langchain.schema.messages`, `messages_to_dict`, `langchain_core.messages`, `messages_to_dict`],
[`langchain.schema.messages`, `messages_from_dict`, `langchain_core.messages`, `messages_from_dict`],
[`langchain.schema.messages`, `_message_to_dict`, `langchain_core.messages`, `message_to_dict`],
[`langchain.schema.messages`, `_message_from_dict`, `langchain_core.messages`, `_message_from_dict`],
[`langchain.schema.messages`, `message_to_dict`, `langchain_core.messages`, `message_to_dict`],
[`langchain.schema.output`, `Generation`, `langchain_core.outputs`, `Generation`],
[`langchain.schema.output`, `GenerationChunk`, `langchain_core.outputs`, `GenerationChunk`],
[`langchain.schema.output`, `ChatGeneration`, `langchain_core.outputs`, `ChatGeneration`],
[`langchain.schema.output`, `ChatGenerationChunk`, `langchain_core.outputs`, `ChatGenerationChunk`],
[`langchain.schema.output`, `RunInfo`, `langchain_core.outputs`, `RunInfo`],
[`langchain.schema.output`, `ChatResult`, `langchain_core.outputs`, `ChatResult`],
[`langchain.schema.output`, `LLMResult`, `langchain_core.outputs`, `LLMResult`],
[`langchain.schema.output_parser`, `BaseLLMOutputParser`, `langchain_core.output_parsers`, `BaseLLMOutputParser`],
[`langchain.schema.output_parser`, `BaseGenerationOutputParser`, `langchain_core.output_parsers`, `BaseGenerationOutputParser`],
[`langchain.schema.output_parser`, `BaseOutputParser`, `langchain_core.output_parsers`, `BaseOutputParser`],
[`langchain.schema.output_parser`, `BaseTransformOutputParser`, `langchain_core.output_parsers`, `BaseTransformOutputParser`],
[`langchain.schema.output_parser`, `BaseCumulativeTransformOutputParser`, `langchain_core.output_parsers`, `BaseCumulativeTransformOutputParser`],
[`langchain.schema.output_parser`, `NoOpOutputParser`, `langchain_core.output_parsers`, `StrOutputParser`],
[`langchain.schema.output_parser`, `StrOutputParser`, `langchain_core.output_parsers`, `StrOutputParser`],
[`langchain.schema.output_parser`, `OutputParserException`, `langchain_core.exceptions`, `OutputParserException`],
[`langchain.schema.prompt`, `PromptValue`, `langchain_core.prompt_values`, `PromptValue`],
[`langchain.schema.prompt_template`, `BasePromptTemplate`, `langchain_core.prompts`, `BasePromptTemplate`],
[`langchain.schema.prompt_template`, `format_document`, `langchain_core.prompts`, `format_document`],
[`langchain.schema.retriever`, `BaseRetriever`, `langchain_core.retrievers`, `BaseRetriever`],
[`langchain.schema.runnable`, `ConfigurableField`, `langchain_core.runnables`, `ConfigurableField`],
[`langchain.schema.runnable`, `ConfigurableFieldSingleOption`, `langchain_core.runnables`, `ConfigurableFieldSingleOption`],
[`langchain.schema.runnable`, `ConfigurableFieldMultiOption`, `langchain_core.runnables`, `ConfigurableFieldMultiOption`],
[`langchain.schema.runnable`, `patch_config`, `langchain_core.runnables`, `patch_config`],
[`langchain.schema.runnable`, `RouterInput`, `langchain_core.runnables`, `RouterInput`],
[`langchain.schema.runnable`, `RouterRunnable`, `langchain_core.runnables`, `RouterRunnable`],
[`langchain.schema.runnable`, `Runnable`, `langchain_core.runnables`, `Runnable`],
[`langchain.schema.runnable`, `RunnableSerializable`, `langchain_core.runnables`, `RunnableSerializable`],
[`langchain.schema.runnable`, `RunnableBinding`, `langchain_core.runnables`, `RunnableBinding`],
[`langchain.schema.runnable`, `RunnableBranch`, `langchain_core.runnables`, `RunnableBranch`],
[`langchain.schema.runnable`, `RunnableConfig`, `langchain_core.runnables`, `RunnableConfig`],
[`langchain.schema.runnable`, `RunnableGenerator`, `langchain_core.runnables`, `RunnableGenerator`],
[`langchain.schema.runnable`, `RunnableLambda`, `langchain_core.runnables`, `RunnableLambda`],
[`langchain.schema.runnable`, `RunnableMap`, `langchain_core.runnables`, `RunnableMap`],
[`langchain.schema.runnable`, `RunnableParallel`, `langchain_core.runnables`, `RunnableParallel`],
[`langchain.schema.runnable`, `RunnablePassthrough`, `langchain_core.runnables`, `RunnablePassthrough`],
[`langchain.schema.runnable`, `RunnableSequence`, `langchain_core.runnables`, `RunnableSequence`],
[`langchain.schema.runnable`, `RunnableWithFallbacks`, `langchain_core.runnables`, `RunnableWithFallbacks`],
[`langchain.schema.runnable.base`, `Runnable`, `langchain_core.runnables`, `Runnable`],
[`langchain.schema.runnable.base`, `RunnableSerializable`, `langchain_core.runnables`, `RunnableSerializable`],
[`langchain.schema.runnable.base`, `RunnableSequence`, `langchain_core.runnables`, `RunnableSequence`],
[`langchain.schema.runnable.base`, `RunnableParallel`, `langchain_core.runnables`, `RunnableParallel`],
[`langchain.schema.runnable.base`, `RunnableGenerator`, `langchain_core.runnables`, `RunnableGenerator`],
[`langchain.schema.runnable.base`, `RunnableLambda`, `langchain_core.runnables`, `RunnableLambda`],
[`langchain.schema.runnable.base`, `RunnableEachBase`, `langchain_core.runnables.base`, `RunnableEachBase`],
[`langchain.schema.runnable.base`, `RunnableEach`, `langchain_core.runnables.base`, `RunnableEach`],
[`langchain.schema.runnable.base`, `RunnableBindingBase`, `langchain_core.runnables.base`, `RunnableBindingBase`],
[`langchain.schema.runnable.base`, `RunnableBinding`, `langchain_core.runnables`, `RunnableBinding`],
[`langchain.schema.runnable.base`, `RunnableMap`, `langchain_core.runnables`, `RunnableMap`],
[`langchain.schema.runnable.base`, `coerce_to_runnable`, `langchain_core.runnables.base`, `coerce_to_runnable`],
[`langchain.schema.runnable.branch`, `RunnableBranch`, `langchain_core.runnables`, `RunnableBranch`],
[`langchain.schema.runnable.config`, `EmptyDict`, `langchain_core.runnables.config`, `EmptyDict`],
[`langchain.schema.runnable.config`, `RunnableConfig`, `langchain_core.runnables`, `RunnableConfig`],
[`langchain.schema.runnable.config`, `ensure_config`, `langchain_core.runnables`, `ensure_config`],
[`langchain.schema.runnable.config`, `get_config_list`, `langchain_core.runnables`, `get_config_list`],
[`langchain.schema.runnable.config`, `patch_config`, `langchain_core.runnables`, `patch_config`],
[`langchain.schema.runnable.config`, `merge_configs`, `langchain_core.runnables.config`, `merge_configs`],
[`langchain.schema.runnable.config`, `acall_func_with_variable_args`, `langchain_core.runnables.config`, `acall_func_with_variable_args`],
[`langchain.schema.runnable.config`, `call_func_with_variable_args`, `langchain_core.runnables.config`, `call_func_with_variable_args`],
[`langchain.schema.runnable.config`, `get_callback_manager_for_config`, `langchain_core.runnables.config`, `get_callback_manager_for_config`],
[`langchain.schema.runnable.config`, `get_async_callback_manager_for_config`, `langchain_core.runnables.config`, `get_async_callback_manager_for_config`],
[`langchain.schema.runnable.config`, `get_executor_for_config`, `langchain_core.runnables.config`, `get_executor_for_config`],
[`langchain.schema.runnable.configurable`, `DynamicRunnable`, `langchain_core.runnables.configurable`, `DynamicRunnable`],
[`langchain.schema.runnable.configurable`, `RunnableConfigurableFields`, `langchain_core.runnables.configurable`, `RunnableConfigurableFields`],
[`langchain.schema.runnable.configurable`, `StrEnum`, `langchain_core.runnables.configurable`, `StrEnum`],
[`langchain.schema.runnable.configurable`, `RunnableConfigurableAlternatives`, `langchain_core.runnables.configurable`, `RunnableConfigurableAlternatives`],
[`langchain.schema.runnable.configurable`, `make_options_spec`, `langchain_core.runnables.configurable`, `make_options_spec`],
[`langchain.schema.runnable.fallbacks`, `RunnableWithFallbacks`, `langchain_core.runnables`, `RunnableWithFallbacks`],
[`langchain.schema.runnable.history`, `RunnableWithMessageHistory`, `langchain_core.runnables.history`, `RunnableWithMessageHistory`],
[`langchain.schema.runnable.passthrough`, `aidentity`, `langchain_core.runnables.passthrough`, `aidentity`],
[`langchain.schema.runnable.passthrough`, `identity`, `langchain_core.runnables.passthrough`, `identity`],
[`langchain.schema.runnable.passthrough`, `RunnablePassthrough`, `langchain_core.runnables`, `RunnablePassthrough`],
[`langchain.schema.runnable.passthrough`, `RunnableAssign`, `langchain_core.runnables`, `RunnableAssign`],
[`langchain.schema.runnable.retry`, `RunnableRetry`, `langchain_core.runnables.retry`, `RunnableRetry`],
[`langchain.schema.runnable.router`, `RouterInput`, `langchain_core.runnables`, `RouterInput`],
[`langchain.schema.runnable.router`, `RouterRunnable`, `langchain_core.runnables`, `RouterRunnable`],
[`langchain.schema.runnable.utils`, `accepts_run_manager`, `langchain_core.runnables.utils`, `accepts_run_manager`],
[`langchain.schema.runnable.utils`, `accepts_config`, `langchain_core.runnables.utils`, `accepts_config`],
[`langchain.schema.runnable.utils`, `IsLocalDict`, `langchain_core.runnables.utils`, `IsLocalDict`],
[`langchain.schema.runnable.utils`, `IsFunctionArgDict`, `langchain_core.runnables.utils`, `IsFunctionArgDict`],
[`langchain.schema.runnable.utils`, `GetLambdaSource`, `langchain_core.runnables.utils`, `GetLambdaSource`],
[`langchain.schema.runnable.utils`, `get_function_first_arg_dict_keys`, `langchain_core.runnables.utils`, `get_function_first_arg_dict_keys`],
[`langchain.schema.runnable.utils`, `get_lambda_source`, `langchain_core.runnables.utils`, `get_lambda_source`],
[`langchain.schema.runnable.utils`, `indent_lines_after_first`, `langchain_core.runnables.utils`, `indent_lines_after_first`],
[`langchain.schema.runnable.utils`, `AddableDict`, `langchain_core.runnables`, `AddableDict`],
[`langchain.schema.runnable.utils`, `SupportsAdd`, `langchain_core.runnables.utils`, `SupportsAdd`],
[`langchain.schema.runnable.utils`, `add`, `langchain_core.runnables`, `add`],
[`langchain.schema.runnable.utils`, `ConfigurableField`, `langchain_core.runnables`, `ConfigurableField`],
[`langchain.schema.runnable.utils`, `ConfigurableFieldSingleOption`, `langchain_core.runnables`, `ConfigurableFieldSingleOption`],
[`langchain.schema.runnable.utils`, `ConfigurableFieldMultiOption`, `langchain_core.runnables`, `ConfigurableFieldMultiOption`],
[`langchain.schema.runnable.utils`, `ConfigurableFieldSpec`, `langchain_core.runnables`, `ConfigurableFieldSpec`],
[`langchain.schema.runnable.utils`, `get_unique_config_specs`, `langchain_core.runnables.utils`, `get_unique_config_specs`],
[`langchain.schema.runnable.utils`, `aadd`, `langchain_core.runnables`, `aadd`],
[`langchain.schema.runnable.utils`, `gated_coro`, `langchain_core.runnables.utils`, `gated_coro`],
[`langchain.schema.runnable.utils`, `gather_with_concurrency`, `langchain_core.runnables.utils`, `gather_with_concurrency`],
[`langchain.schema.storage`, `BaseStore`, `langchain_core.stores`, `BaseStore`],
[`langchain.schema.vectorstore`, `VectorStore`, `langchain_core.vectorstores`, `VectorStore`],
[`langchain.schema.vectorstore`, `VectorStoreRetriever`, `langchain_core.vectorstores`, `VectorStoreRetriever`],
[`langchain.tools`, `BaseTool`, `langchain_core.tools`, `BaseTool`],
[`langchain.tools`, `StructuredTool`, `langchain_core.tools`, `StructuredTool`],
[`langchain.tools`, `Tool`, `langchain_core.tools`, `Tool`],
[`langchain.tools`, `format_tool_to_openai_function`, `langchain_core.utils.function_calling`, `format_tool_to_openai_function`],
[`langchain.tools`, `tool`, `langchain_core.tools`, `tool`],
[`langchain.tools.base`, `SchemaAnnotationError`, `langchain_core.tools`, `SchemaAnnotationError`],
[`langchain.tools.base`, `create_schema_from_function`, `langchain_core.tools`, `create_schema_from_function`],
[`langchain.tools.base`, `ToolException`, `langchain_core.tools`, `ToolException`],
[`langchain.tools.base`, `BaseTool`, `langchain_core.tools`, `BaseTool`],
[`langchain.tools.base`, `Tool`, `langchain_core.tools`, `Tool`],
[`langchain.tools.base`, `StructuredTool`, `langchain_core.tools`, `StructuredTool`],
[`langchain.tools.base`, `tool`, `langchain_core.tools`, `tool`],
[`langchain.tools.convert_to_openai`, `format_tool_to_openai_function`, `langchain_core.utils.function_calling`, `format_tool_to_openai_function`],
[`langchain.tools.render`, `format_tool_to_openai_tool`, `langchain_core.utils.function_calling`, `format_tool_to_openai_tool`],
[`langchain.tools.render`, `format_tool_to_openai_function`, `langchain_core.utils.function_calling`, `format_tool_to_openai_function`],
[`langchain.utilities.loading`, `try_load_from_hub`, `langchain_core.utils`, `try_load_from_hub`],
[`langchain.utils`, `StrictFormatter`, `langchain_core.utils`, `StrictFormatter`],
[`langchain.utils`, `check_package_version`, `langchain_core.utils`, `check_package_version`],
[`langchain.utils`, `comma_list`, `langchain_core.utils`, `comma_list`],
[`langchain.utils`, `convert_to_secret_str`, `langchain_core.utils`, `convert_to_secret_str`],
[`langchain.utils`, `get_bolded_text`, `langchain_core.utils`, `get_bolded_text`],
[`langchain.utils`, `get_color_mapping`, `langchain_core.utils`, `get_color_mapping`],
[`langchain.utils`, `get_colored_text`, `langchain_core.utils`, `get_colored_text`],
[`langchain.utils`, `get_from_dict_or_env`, `langchain_core.utils`, `get_from_dict_or_env`],
[`langchain.utils`, `get_from_env`, `langchain_core.utils`, `get_from_env`],
[`langchain.utils`, `get_pydantic_field_names`, `langchain_core.utils`, `get_pydantic_field_names`],
[`langchain.utils`, `guard_import`, `langchain_core.utils`, `guard_import`],
[`langchain.utils`, `mock_now`, `langchain_core.utils`, `mock_now`],
[`langchain.utils`, `print_text`, `langchain_core.utils`, `print_text`],
[`langchain.utils`, `raise_for_status_with_text`, `langchain_core.utils`, `raise_for_status_with_text`],
[`langchain.utils`, `stringify_dict`, `langchain_core.utils`, `stringify_dict`],
[`langchain.utils`, `stringify_value`, `langchain_core.utils`, `stringify_value`],
[`langchain.utils`, `xor_args`, `langchain_core.utils`, `xor_args`],
[`langchain.utils.aiter`, `py_anext`, `langchain_core.utils.aiter`, `py_anext`],
[`langchain.utils.aiter`, `NoLock`, `langchain_core.utils.aiter`, `NoLock`],
[`langchain.utils.aiter`, `Tee`, `langchain_core.utils.aiter`, `Tee`],
[`langchain.utils.env`, `get_from_dict_or_env`, `langchain_core.utils`, `get_from_dict_or_env`],
[`langchain.utils.env`, `get_from_env`, `langchain_core.utils`, `get_from_env`],
[`langchain.utils.formatting`, `StrictFormatter`, `langchain_core.utils`, `StrictFormatter`],
[`langchain.utils.html`, `find_all_links`, `langchain_core.utils.html`, `find_all_links`],
[`langchain.utils.html`, `extract_sub_links`, `langchain_core.utils.html`, `extract_sub_links`],
[`langchain.utils.input`, `get_color_mapping`, `langchain_core.utils`, `get_color_mapping`],
[`langchain.utils.input`, `get_colored_text`, `langchain_core.utils`, `get_colored_text`],
[`langchain.utils.input`, `get_bolded_text`, `langchain_core.utils`, `get_bolded_text`],
[`langchain.utils.input`, `print_text`, `langchain_core.utils`, `print_text`],
[`langchain.utils.iter`, `NoLock`, `langchain_core.utils.iter`, `NoLock`],
[`langchain.utils.iter`, `tee_peer`, `langchain_core.utils.iter`, `tee_peer`],
[`langchain.utils.iter`, `Tee`, `langchain_core.utils.iter`, `Tee`],
[`langchain.utils.iter`, `batch_iterate`, `langchain_core.utils.iter`, `batch_iterate`],
[`langchain.utils.json_schema`, `_retrieve_ref`, `langchain_core.utils.json_schema`, `_retrieve_ref`],
[`langchain.utils.json_schema`, `_dereference_refs_helper`, `langchain_core.utils.json_schema`, `_dereference_refs_helper`],
[`langchain.utils.json_schema`, `_infer_skip_keys`, `langchain_core.utils.json_schema`, `_infer_skip_keys`],
[`langchain.utils.json_schema`, `dereference_refs`, `langchain_core.utils.json_schema`, `dereference_refs`],
[`langchain.utils.loading`, `try_load_from_hub`, `langchain_core.utils`, `try_load_from_hub`],
[`langchain.utils.openai_functions`, `FunctionDescription`, `langchain_core.utils.function_calling`, `FunctionDescription`],
[`langchain.utils.openai_functions`, `ToolDescription`, `langchain_core.utils.function_calling`, `ToolDescription`],
[`langchain.utils.openai_functions`, `convert_pydantic_to_openai_function`, `langchain_core.utils.function_calling`, `convert_pydantic_to_openai_function`],
[`langchain.utils.openai_functions`, `convert_pydantic_to_openai_tool`, `langchain_core.utils.function_calling`, `convert_pydantic_to_openai_tool`],
[`langchain.utils.pydantic`, `get_pydantic_major_version`, `langchain_core.utils.pydantic`, `get_pydantic_major_version`],
[`langchain.utils.strings`, `stringify_value`, `langchain_core.utils`, `stringify_value`],
[`langchain.utils.strings`, `stringify_dict`, `langchain_core.utils`, `stringify_dict`],
[`langchain.utils.strings`, `comma_list`, `langchain_core.utils`, `comma_list`],
[`langchain.utils.utils`, `xor_args`, `langchain_core.utils`, `xor_args`],
[`langchain.utils.utils`, `raise_for_status_with_text`, `langchain_core.utils`, `raise_for_status_with_text`],
[`langchain.utils.utils`, `mock_now`, `langchain_core.utils`, `mock_now`],
[`langchain.utils.utils`, `guard_import`, `langchain_core.utils`, `guard_import`],
[`langchain.utils.utils`, `check_package_version`, `langchain_core.utils`, `check_package_version`],
[`langchain.utils.utils`, `get_pydantic_field_names`, `langchain_core.utils`, `get_pydantic_field_names`],
[`langchain.utils.utils`, `build_extra_kwargs`, `langchain_core.utils`, `build_extra_kwargs`],
[`langchain.utils.utils`, `convert_to_secret_str`, `langchain_core.utils`, `convert_to_secret_str`],
[`langchain.vectorstores`, `VectorStore`, `langchain_core.vectorstores`, `VectorStore`],
[`langchain.vectorstores.base`, `VectorStore`, `langchain_core.vectorstores`, `VectorStore`],
[`langchain.vectorstores.base`, `VectorStoreRetriever`, `langchain_core.vectorstores`, `VectorStoreRetriever`],
[`langchain.vectorstores.singlestoredb`, `SingleStoreDBRetriever`, `langchain_core.vectorstores`, `VectorStoreRetriever`]
])
}
// Add this for invoking directly
langchain_migrate_langchain_to_core()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_langchain_to_textsplitters() {
find_replace_imports(list=[
[`langchain.text_splitter`, `TokenTextSplitter`, `langchain_text_splitters`, `TokenTextSplitter`],
[`langchain.text_splitter`, `TextSplitter`, `langchain_text_splitters`, `TextSplitter`],
[`langchain.text_splitter`, `Tokenizer`, `langchain_text_splitters`, `Tokenizer`],
[`langchain.text_splitter`, `Language`, `langchain_text_splitters`, `Language`],
[`langchain.text_splitter`, `RecursiveCharacterTextSplitter`, `langchain_text_splitters`, `RecursiveCharacterTextSplitter`],
[`langchain.text_splitter`, `RecursiveJsonSplitter`, `langchain_text_splitters`, `RecursiveJsonSplitter`],
[`langchain.text_splitter`, `LatexTextSplitter`, `langchain_text_splitters`, `LatexTextSplitter`],
[`langchain.text_splitter`, `PythonCodeTextSplitter`, `langchain_text_splitters`, `PythonCodeTextSplitter`],
[`langchain.text_splitter`, `KonlpyTextSplitter`, `langchain_text_splitters`, `KonlpyTextSplitter`],
[`langchain.text_splitter`, `SpacyTextSplitter`, `langchain_text_splitters`, `SpacyTextSplitter`],
[`langchain.text_splitter`, `NLTKTextSplitter`, `langchain_text_splitters`, `NLTKTextSplitter`],
[`langchain.text_splitter`, `split_text_on_tokens`, `langchain_text_splitters`, `split_text_on_tokens`],
[`langchain.text_splitter`, `SentenceTransformersTokenTextSplitter`, `langchain_text_splitters`, `SentenceTransformersTokenTextSplitter`],
[`langchain.text_splitter`, `ElementType`, `langchain_text_splitters`, `ElementType`],
[`langchain.text_splitter`, `HeaderType`, `langchain_text_splitters`, `HeaderType`],
[`langchain.text_splitter`, `LineType`, `langchain_text_splitters`, `LineType`],
[`langchain.text_splitter`, `HTMLHeaderTextSplitter`, `langchain_text_splitters`, `HTMLHeaderTextSplitter`],
[`langchain.text_splitter`, `MarkdownHeaderTextSplitter`, `langchain_text_splitters`, `MarkdownHeaderTextSplitter`],
[`langchain.text_splitter`, `MarkdownTextSplitter`, `langchain_text_splitters`, `MarkdownTextSplitter`],
[`langchain.text_splitter`, `CharacterTextSplitter`, `langchain_text_splitters`, `CharacterTextSplitter`]
])
}
// Add this for invoking directly
langchain_migrate_langchain_to_textsplitters()

View File

@@ -0,0 +1,70 @@
[
[
"langchain.text_splitter.TokenTextSplitter",
"langchain_text_splitters.TokenTextSplitter"
],
[
"langchain.text_splitter.TextSplitter",
"langchain_text_splitters.TextSplitter"
],
["langchain.text_splitter.Tokenizer", "langchain_text_splitters.Tokenizer"],
["langchain.text_splitter.Language", "langchain_text_splitters.Language"],
[
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_text_splitters.RecursiveCharacterTextSplitter"
],
[
"langchain.text_splitter.RecursiveJsonSplitter",
"langchain_text_splitters.RecursiveJsonSplitter"
],
[
"langchain.text_splitter.LatexTextSplitter",
"langchain_text_splitters.LatexTextSplitter"
],
[
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain_text_splitters.PythonCodeTextSplitter"
],
[
"langchain.text_splitter.KonlpyTextSplitter",
"langchain_text_splitters.KonlpyTextSplitter"
],
[
"langchain.text_splitter.SpacyTextSplitter",
"langchain_text_splitters.SpacyTextSplitter"
],
[
"langchain.text_splitter.NLTKTextSplitter",
"langchain_text_splitters.NLTKTextSplitter"
],
[
"langchain.text_splitter.split_text_on_tokens",
"langchain_text_splitters.split_text_on_tokens"
],
[
"langchain.text_splitter.SentenceTransformersTokenTextSplitter",
"langchain_text_splitters.SentenceTransformersTokenTextSplitter"
],
[
"langchain.text_splitter.ElementType",
"langchain_text_splitters.ElementType"
],
["langchain.text_splitter.HeaderType", "langchain_text_splitters.HeaderType"],
["langchain.text_splitter.LineType", "langchain_text_splitters.LineType"],
[
"langchain.text_splitter.HTMLHeaderTextSplitter",
"langchain_text_splitters.HTMLHeaderTextSplitter"
],
[
"langchain.text_splitter.MarkdownHeaderTextSplitter",
"langchain_text_splitters.MarkdownHeaderTextSplitter"
],
[
"langchain.text_splitter.MarkdownTextSplitter",
"langchain_text_splitters.MarkdownTextSplitter"
],
[
"langchain.text_splitter.CharacterTextSplitter",
"langchain_text_splitters.CharacterTextSplitter"
]
]

View File

@@ -0,0 +1,23 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_openai() {
find_replace_imports(list=[
[`langchain_community.embeddings.openai`, `OpenAIEmbeddings`, `langchain_openai`, `OpenAIEmbeddings`],
[`langchain_community.embeddings.azure_openai`, `AzureOpenAIEmbeddings`, `langchain_openai`, `AzureOpenAIEmbeddings`],
[`langchain_community.chat_models.openai`, `ChatOpenAI`, `langchain_openai`, `ChatOpenAI`],
[`langchain_community.chat_models.azure_openai`, `AzureChatOpenAI`, `langchain_openai`, `AzureChatOpenAI`],
[`langchain_community.llms.openai`, `OpenAI`, `langchain_openai`, `OpenAI`],
[`langchain_community.llms.openai`, `AzureOpenAI`, `langchain_openai`, `AzureOpenAI`],
[`langchain_community.embeddings`, `AzureOpenAIEmbeddings`, `langchain_openai`, `AzureOpenAIEmbeddings`],
[`langchain_community.embeddings`, `OpenAIEmbeddings`, `langchain_openai`, `OpenAIEmbeddings`],
[`langchain_community.chat_models`, `AzureChatOpenAI`, `langchain_openai`, `AzureChatOpenAI`],
[`langchain_community.chat_models`, `ChatOpenAI`, `langchain_openai`, `ChatOpenAI`],
[`langchain_community.llms`, `AzureOpenAI`, `langchain_openai`, `AzureOpenAI`],
[`langchain_community.llms`, `OpenAI`, `langchain_openai`, `OpenAI`]
])
}
// Add this for invoking directly
langchain_migrate_openai()

View File

@@ -0,0 +1,13 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern langchain_migrate_pinecone() {
find_replace_imports(list=[
[`langchain_community.vectorstores.pinecone`, `Pinecone`, `langchain_pinecone`, `Pinecone`],
[`langchain_community.vectorstores`, `Pinecone`, `langchain_pinecone`, `Pinecone`]
])
}
// Add this for invoking directly
langchain_migrate_pinecone()

View File

@@ -0,0 +1,36 @@
language python
// This migration is generated automatically - do not manually edit this file
pattern replace_pydantic_v1_shim() {
`from $IMPORT import $...` where {
or {
and {
$IMPORT <: or {
"langchain_core.pydantic_v1",
"langchain.pydantic_v1",
"langserve.pydantic_v1",
},
$IMPORT => `pydantic`
},
and {
$IMPORT <: or {
"langchain_core.pydantic_v1.data_classes",
"langchain.pydantic_v1.data_classes",
"langserve.pydantic_v1.data_classes",
},
$IMPORT => `pydantic.data_classes`
},
and {
$IMPORT <: or {
"langchain_core.pydantic_v1.main",
"langchain.pydantic_v1.main",
"langserve.pydantic_v1.main",
},
$IMPORT => `pydantic.main`
},
}
}
}
// Add this for invoking directly
replace_pydantic_v1_shim()

View File

@@ -0,0 +1 @@
"""Migrations."""

View File

@@ -0,0 +1 @@
"""Generate migrations."""

View File

@@ -0,0 +1,182 @@
"""Generate migrations from langchain to langchain-community or core packages."""
import importlib
import inspect
import pkgutil
from types import ModuleType
def generate_raw_migrations(
from_package: str,
to_package: str,
filter_by_all: bool = False, # noqa: FBT001, FBT002
) -> list[tuple[str, str]]:
"""Scan the `langchain` package and generate migrations for all modules.
Args:
from_package: The package to migrate from.
to_package: The package to migrate to.
filter_by_all: Whether to only consider items in `__all__`.
Returns:
A list of tuples containing the original import path and the new import path.
"""
package = importlib.import_module(from_package)
items = []
for _importer, modname, _ispkg in pkgutil.walk_packages(
package.__path__,
package.__name__ + ".",
):
try:
module = importlib.import_module(modname)
except ModuleNotFoundError:
continue
# Check if the module is an __init__ file and evaluate __all__
try:
has_all = hasattr(module, "__all__")
except ImportError:
has_all = False
if has_all:
all_objects = module.__all__
for name in all_objects:
# Attempt to fetch each object declared in __all__
try:
obj = getattr(module, name, None)
except ImportError:
continue
if (
obj
and (inspect.isclass(obj) or inspect.isfunction(obj))
and obj.__module__.startswith(to_package)
):
items.append(
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}"),
)
if not filter_by_all:
# Iterate over all members of the module
for name, obj in inspect.getmembers(module):
# Check if it's a class or function
# Check if the module name of the obj starts with
# 'langchain_community'
if inspect.isclass(obj) or (
inspect.isfunction(obj) and obj.__module__.startswith(to_package)
):
items.append(
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}"),
)
return items
def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
"""Look at all the top level modules in langchain_community.
Attempt to import everything from each `__init__` file. For example,
langchain_community/
chat_models/
__init__.py # <-- import everything from here
llm/
__init__.py # <-- import everything from here
It'll collect all the imports, import the classes / functions it can find
there. It'll return a list of 2-tuples
Each tuple will contain the fully qualified path of the class / function to where
its logic is defined.
(e.g., `langchain_community.chat_models.xyz_implementation.ver2.XYZ`)
and the second tuple will contain the path
to importing it from the top level namespaces
(e.g., `langchain_community.chat_models.XYZ`)
Args:
pkg: The package to scan.
Returns:
A list of tuples containing the fully qualified path and the top-level
import path.
"""
package = importlib.import_module(pkg)
items = []
# Function to handle importing from modules
def handle_module(module: ModuleType, module_name: str) -> None:
if hasattr(module, "__all__"):
all_objects = module.__all__
for name in all_objects:
# Attempt to fetch each object declared in __all__
obj = getattr(module, name, None)
if obj and (inspect.isclass(obj) or inspect.isfunction(obj)):
# Capture the fully qualified name of the object
original_module = obj.__module__
original_name = obj.__name__
# Form the new import path from the top-level namespace
top_level_import = f"{module_name}.{name}"
# Append the tuple with original and top-level paths
items.append(
(f"{original_module}.{original_name}", top_level_import),
)
# Handle the package itself (root level)
handle_module(package, pkg)
# Only iterate through top-level modules/packages
for _finder, modname, ispkg in pkgutil.iter_modules(
package.__path__,
package.__name__ + ".",
):
if ispkg:
try:
module = importlib.import_module(modname)
handle_module(module, modname)
except ModuleNotFoundError:
continue
return items
def generate_simplified_migrations(
from_package: str,
to_package: str,
filter_by_all: bool = True, # noqa: FBT001, FBT002
) -> list[tuple[str, str]]:
"""Get all the raw migrations, then simplify them if possible.
Args:
from_package: The package to migrate from.
to_package: The package to migrate to.
filter_by_all: Whether to only consider items in `__all__`.
Returns:
A list of tuples containing the original import path and the simplified
import path.
"""
raw_migrations = generate_raw_migrations(
from_package,
to_package,
filter_by_all=filter_by_all,
)
top_level_simplifications = generate_top_level_imports(to_package)
top_level_dict = dict(top_level_simplifications)
simple_migrations = []
for migration in raw_migrations:
original, new = migration
replacement = top_level_dict.get(new, new)
simple_migrations.append((original, replacement))
# Now let's deduplicate the list based on the original path (which is
# the 1st element of the tuple)
deduped_migrations = []
seen = set()
for migration in simple_migrations:
original = migration[0]
if original not in seen:
deduped_migrations.append(migration)
seen.add(original)
return deduped_migrations

View File

@@ -0,0 +1,53 @@
"""Migration as Grit file."""
def split_package(package: str) -> tuple[str, str]:
"""Split a package name into the containing package and the final name.
Args:
package: The full package name.
Returns:
A tuple of `(containing_package, final_name)`.
"""
parts = package.split(".")
return ".".join(parts[:-1]), parts[-1]
def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -> str:
"""Dump the migration pairs as a Grit file.
Args:
name: The name of the migration.
migration_pairs: A list of tuples `(from_module, to_module)`.
Returns:
The Grit file as a string.
"""
remapped = ",\n".join(
[
f"""
[
`{split_package(from_module)[0]}`,
`{split_package(from_module)[1]}`,
`{split_package(to_module)[0]}`,
`{split_package(to_module)[1]}`
]
"""
for from_module, to_module in migration_pairs
],
)
pattern_name = f"langchain_migrate_{name}"
return f"""
language python
// This migration is generated automatically - do not manually edit this file
pattern {pattern_name}() {{
find_replace_imports(list=[
{remapped}
])
}}
// Add this for invoking directly
{pattern_name}()
"""

View File

@@ -0,0 +1,53 @@
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langchain_cli.namespaces.migrate.generate.utils import (
COMMUNITY_PKG,
find_subclasses_in_module,
list_classes_by_package,
list_init_imports_by_package,
)
# PUBLIC API
def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
"""Generate migrations from community package to partner package.
This code works
Args:
pkg_name: The name of the partner package.
Returns:
List of 2-tuples containing old and new import paths.
"""
package = importlib.import_module(pkg_name)
classes_ = find_subclasses_in_module(
package,
[
BaseLanguageModel,
Embeddings,
BaseRetriever,
VectorStore,
BaseDocumentTransformer,
BaseDocumentCompressor,
],
)
community_classes = list_classes_by_package(str(COMMUNITY_PKG))
imports_for_pkg = list_init_imports_by_package(str(COMMUNITY_PKG))
old_paths = community_classes + imports_for_pkg
return [
(f"{module}.{item}", f"{pkg_name}.{item}")
for module, item in old_paths
if item in classes_
]

View File

@@ -0,0 +1,222 @@
"""Generate migrations utilities."""
import ast
import inspect
import os
import pathlib
from pathlib import Path
from types import ModuleType
from typing_extensions import override
HERE = Path(__file__).parent
# Should bring us to [root]/src
PKGS_ROOT = HERE.parent.parent.parent.parent.parent
LANGCHAIN_PKG = PKGS_ROOT / "langchain"
COMMUNITY_PKG = PKGS_ROOT / "community"
PARTNER_PKGS = PKGS_ROOT / "partners"
class ImportExtractor(ast.NodeVisitor):
"""Import extractor."""
def __init__(self, *, from_package: str | None = None) -> None:
"""Extract all imports from the given code, optionally filtering by package."""
self.imports: list[tuple[str, str]] = []
self.package = from_package
@override
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module and (
self.package is None or str(node.module).startswith(self.package)
):
for alias in node.names:
self.imports.append((node.module, alias.name))
self.generic_visit(node)
def _get_class_names(code: str) -> list[str]:
"""Extract class names from a code string."""
# Parse the content of the file into an AST
tree = ast.parse(code)
# Initialize a list to hold all class names
class_names = []
# Define a node visitor class to collect class names
class ClassVisitor(ast.NodeVisitor):
@override
def visit_ClassDef(self, node: ast.ClassDef) -> None:
class_names.append(node.name)
self.generic_visit(node)
# Create an instance of the visitor and visit the AST
visitor = ClassVisitor()
visitor.visit(tree)
return class_names
def is_subclass(class_obj: type, classes_: list[type]) -> bool:
"""Check if the given class object is a subclass of any class in list classes.
Args:
class_obj: The class to check.
classes_: A list of classes to check against.
Returns:
True if `class_obj` is a subclass of any class in `classes_`, `False` otherwise.
"""
return any(
issubclass(class_obj, kls)
for kls in classes_
if inspect.isclass(class_obj) and inspect.isclass(kls)
)
def find_subclasses_in_module(module: ModuleType, classes_: list[type]) -> list[str]:
"""Find all classes in the module that inherit from one of the classes.
Args:
module: The module to inspect.
classes_: A list of classes to check against.
Returns:
A list of class names that are subclasses of any class in `classes_`.
"""
subclasses = []
# Iterate over all attributes of the module that are classes
for _name, obj in inspect.getmembers(module, inspect.isclass):
if is_subclass(obj, classes_):
subclasses.append(obj.__name__)
return subclasses
def _get_all_classnames_from_file(file: Path, pkg: str) -> list[tuple[str, str]]:
"""Extract all class names from a file."""
code = Path(file).read_text(encoding="utf-8")
module_name = _get_current_module(file, pkg)
class_names = _get_class_names(code)
return [(module_name, class_name) for class_name in class_names]
def identify_all_imports_in_file(
file: str,
*,
from_package: str | None = None,
) -> list[tuple[str, str]]:
"""Identify all the imports in the given file.
Args:
file: The file to analyze.
from_package: If provided, only return imports from this package.
Returns:
A list of tuples `(module, name)` representing the imports found in the file.
"""
code = Path(file).read_text(encoding="utf-8")
return find_imports_from_package(code, from_package=from_package)
def identify_pkg_source(pkg_root: str) -> pathlib.Path:
"""Identify the source of the package.
Args:
pkg_root: the root of the package. This contains source + tests, and other
things like pyproject.toml, lock files etc
Returns:
Returns the path to the source code for the package.
Raises:
ValueError: If there is not exactly one directory starting with `'langchain_'`
in the package root.
"""
dirs = [d for d in Path(pkg_root).iterdir() if d.is_dir()]
matching_dirs = [d for d in dirs if d.name.startswith("langchain_")]
if len(matching_dirs) != 1:
msg = "There should be only one langchain package."
raise ValueError(msg)
return matching_dirs[0]
def list_classes_by_package(pkg_root: str) -> list[tuple[str, str]]:
"""List all classes in a package.
Args:
pkg_root: the root of the package.
Returns:
A list of tuples `(module, class_name)` representing all classes found in the
package, excluding test files.
"""
module_classes = []
pkg_source = identify_pkg_source(pkg_root)
files = list(pkg_source.rglob("*.py"))
for file in files:
rel_path = os.path.relpath(file, pkg_root)
if rel_path.startswith("tests"):
continue
module_classes.extend(_get_all_classnames_from_file(file, pkg_root))
return module_classes
def list_init_imports_by_package(pkg_root: str) -> list[tuple[str, str]]:
"""List all the things that are being imported in a package by module.
Args:
pkg_root: the root of the package.
Returns:
A list of tuples `(module, name)` representing the imports found in
`__init__.py` files.
"""
imports = []
pkg_source = identify_pkg_source(pkg_root)
# Scan all the files in the package
files = list(Path(pkg_source).rglob("*.py"))
for file in files:
if file.name != "__init__.py":
continue
import_in_file = identify_all_imports_in_file(str(file))
module_name = _get_current_module(file, pkg_root)
imports.extend([(module_name, item) for _, item in import_in_file])
return imports
def find_imports_from_package(
code: str,
*,
from_package: str | None = None,
) -> list[tuple[str, str]]:
"""Find imports in code.
Args:
code: The code to analyze.
from_package: If provided, only return imports from this package.
Returns:
A list of tuples `(module, name)` representing the imports found.
"""
# Parse the code into an AST
tree = ast.parse(code)
# Create an instance of the visitor
extractor = ImportExtractor(from_package=from_package)
# Use the visitor to update the imports list
extractor.visit(tree)
return extractor.imports
def _get_current_module(path: Path, pkg_root: str) -> str:
"""Convert a path to a module name."""
relative_path = path.relative_to(pkg_root).with_suffix("")
posix_path = relative_path.as_posix()
norm_path = os.path.normpath(str(posix_path))
fully_qualified_module = norm_path.replace("/", ".")
# Strip __init__ if present
if fully_qualified_module.endswith(".__init__"):
return fully_qualified_module[:-9]
return fully_qualified_module

View File

@@ -0,0 +1,74 @@
"""Migrate LangChain to the most recent version."""
from pathlib import Path
import rich
import typer
from gritql import run # type: ignore[import-untyped]
from typer import Option
def get_gritdir_path() -> Path:
"""Get the path to the grit directory."""
script_dir = Path(__file__).parent
return script_dir / ".grit"
def migrate(
ctx: typer.Context,
# Using diff instead of dry-run for backwards compatibility with the old CLI
diff: bool = Option( # noqa: FBT001
False, # noqa: FBT003
"--diff",
help="Show the changes that would be made without applying them.",
),
interactive: bool = Option( # noqa: FBT001
False, # noqa: FBT003
"--interactive",
help="Prompt for confirmation before making each change",
),
) -> None:
"""Migrate langchain to the most recent version.
Any undocumented arguments will be passed to the Grit CLI.
"""
rich.print(
"✈️ This script will help you migrate to a LangChain 0.3. "
"This migration script will attempt to replace old imports in the code "
"with new ones. "
"If you need to migrate to LangChain 0.2, please downgrade to version 0.0.29 "
"of the langchain-cli.\n\n"
"🔄 You will need to run the migration script TWICE to migrate (e.g., "
"to update llms import from langchain, the script will first move them to "
"corresponding imports from the community package, and on the second "
"run will migrate from the community package to the partner package "
"when possible). \n\n"
"🔍 You can pre-view the changes by running with the --diff flag. \n\n"
"🚫 You can disable specific import changes by using the --disable "
"flag. \n\n"
"📄 Update your pyproject.toml or requirements.txt file to "
"reflect any imports from new packages. For example, if you see new "
"imports from langchain_openai, langchain_anthropic or "
"langchain_text_splitters you "
"should add them to your dependencies! \n\n"
'⚠️ This script is a "best-effort", and is likely to make some '
"mistakes.\n\n"
"🛡️ Backup your code prior to running the migration script -- it will "
"modify your files!\n\n",
)
rich.print("-" * 10)
rich.print()
args = list(ctx.args)
if interactive:
args.append("--interactive")
if diff:
args.append("--dry-run")
final_code = run.apply_pattern(
"langchain_all_migrations()",
args,
grit_dir=str(get_gritdir_path()),
)
raise typer.Exit(code=final_code)

View File

@@ -0,0 +1,147 @@
"""Develop installable templates."""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Annotated
import typer
import uvicorn
from langchain_cli.utils.github import list_packages
from langchain_cli.utils.packages import get_langserve_export, get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[ # noqa: FBT002
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
) -> None:
"""Create a new template package."""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and not package_name_split[-1]
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__",
module_name,
),
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__",
app_route_code,
),
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir, check=True) # noqa: S607
@package_cli.command()
def serve(
*,
port: Annotated[
int | None,
typer.Option(help="The port to run the server on"),
] = None,
host: Annotated[
str | None,
typer.Option(help="The host to run the server on"),
] = None,
configurable: Annotated[
bool | None,
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = None, # defaults to `not chat_playground`
chat_playground: Annotated[
bool,
typer.Option(
"--chat-playground/--no-chat-playground",
help="Whether to include a chat playground route",
),
] = False,
) -> None:
"""Start a demo app for this template."""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server_chat"
if chat_playground
else (
"langchain_cli.dev_scripts:create_demo_server_configurable"
if configurable
else "langchain_cli.dev_scripts:create_demo_server"
)
)
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
@package_cli.command()
def list(contains: Annotated[str | None, typer.Argument()] = None) -> None: # noqa: A001
"""List all or search for available templates."""
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package)

View File

@@ -0,0 +1 @@
__pycache__

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,64 @@
# __package_name__
TODO: What does this package do
## Environment Setup
TODO: What environment variables need to be set (if any)
## Usage
To use this package, you should first have the LangChain CLI installed:
```shell
pip install -U langchain-cli
```
To create a new LangChain project and install this as the only package, you can do:
```shell
langchain app new my-app --package __package_name__
```
If you want to add this to an existing project, you can just run:
```shell
langchain app add __package_name__
```
And add the following code to your `server.py` file:
```python
__app_route_code__
```
(Optional) Let's now configure LangSmith.
LangSmith will help us trace, monitor and debug LangChain applications.
You can sign up for LangSmith [here](https://smith.langchain.com/).
If you don't have access, you can skip this section
```shell
export LANGSMITH_TRACING=true
export LANGSMITH_API_KEY=<your-api-key>
export LANGSMITH_PROJECT=<your-project> # if not specified, defaults to "default"
```
If you are inside this directory, then you can spin up a LangServe instance directly by:
```shell
langchain serve
```
This will start the FastAPI app with a server is running locally at
[http://localhost:8000](http://localhost:8000)
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
We can access the playground at [http://127.0.0.1:8000/__package_name__/playground](http://127.0.0.1:8000/__package_name__/playground)
We can access the template from code with:
```python
from langserve.client import RemoteRunnable
runnable = RemoteRunnable("http://localhost:8000/__package_name__")
```

View File

@@ -0,0 +1,5 @@
"""__module_name__ module."""
from __module_name__.chain import chain
__all__ = ["chain"]

View File

@@ -0,0 +1,19 @@
"""Chain definition."""
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant who speaks like a pirate",
),
("human", "{text}"),
]
)
_model = ChatOpenAI()
# if you update this, you MUST also update ../pyproject.toml
# with the new `tool.langserve.export_attr`
chain = _prompt | _model

View File

@@ -0,0 +1,24 @@
[build-system]
requires = [ "poetry-core",]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "__package_name__"
version = "0.0.1"
description = ""
authors = []
readme = "README.md"
[tool.langserve]
export_module = "__module_name__"
export_attr = "chain"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
langchain-core = "^0.3.15"
langchain-openai = ">=0.0.1"
[tool.poetry.group.dev.dependencies]
langchain-cli = ">=0.0.4"
fastapi = "^0.104.0"
sse-starlette = "^1.6.5"

Some files were not shown because too many files have changed in this diff Show More