mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-10 19:20:24 +00:00
Compare commits
128 Commits
wfh/speedy
...
v04_standa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5de65de99a | ||
|
|
20b80a1ef8 | ||
|
|
b5462b8979 | ||
|
|
20a6bdf510 | ||
|
|
36dbb20da7 | ||
|
|
3768bb1d58 | ||
|
|
8d85a25f87 | ||
|
|
6594eb8cc1 | ||
|
|
060fc0e3c9 | ||
|
|
4d9c0b0883 | ||
|
|
a8998a1f57 | ||
|
|
042da2a2b2 | ||
|
|
c026a71a06 | ||
|
|
059942b5fc | ||
|
|
bc5e8e0c17 | ||
|
|
8064d3bdc4 | ||
|
|
791c0e2e8f | ||
|
|
0002b1dafa | ||
|
|
93c1aeebb6 | ||
|
|
ac23607a61 | ||
|
|
59b12f7e46 | ||
|
|
f33a25773e | ||
|
|
1f829aacf4 | ||
|
|
4d9eefecab | ||
|
|
e7f1ceee67 | ||
|
|
71b361936d | ||
|
|
cbb418b4bf | ||
|
|
ae210c1590 | ||
|
|
5b3e29f809 | ||
|
|
0a17a62548 | ||
|
|
a1c1421bf4 | ||
|
|
83d8be756a | ||
|
|
3f839d566a | ||
|
|
65b098325b | ||
|
|
4e513539f8 | ||
|
|
b8e2420865 | ||
|
|
a3e3fd20f2 | ||
|
|
2c7eafffec | ||
|
|
dd76209bbd | ||
|
|
750721b4c3 | ||
|
|
06ab2972e3 | ||
|
|
63e3f2dea6 | ||
|
|
231e8d0f43 | ||
|
|
38bd1abb8c | ||
|
|
2a7645300c | ||
|
|
e7eac27241 | ||
|
|
706a66eccd | ||
|
|
e686a70ee0 | ||
|
|
0eb10f31c1 | ||
|
|
47d330f4e6 | ||
|
|
4215261be1 | ||
|
|
a751a23c4e | ||
|
|
8ef01f8444 | ||
|
|
6c23c711fb | ||
|
|
03e8327e01 | ||
|
|
ba144c9d7f | ||
|
|
ed35372580 | ||
|
|
56bbfd9723 | ||
|
|
fceebbb387 | ||
|
|
4134b36db8 | ||
|
|
9368b92b2c | ||
|
|
2df3fdf40d | ||
|
|
f06380516f | ||
|
|
49c316667d | ||
|
|
1276bf3e1d | ||
|
|
53c75abba2 | ||
|
|
a46a2b8bda | ||
|
|
451c90fefa | ||
|
|
8aed3b61a9 | ||
|
|
73552883c3 | ||
|
|
013ce2c47f | ||
|
|
e934788ca2 | ||
|
|
bf05229029 | ||
|
|
3f4b355eef | ||
|
|
46fe09f013 | ||
|
|
df5cc024fd | ||
|
|
a15c3e0856 | ||
|
|
e1eb3f8d6f | ||
|
|
64815445e4 | ||
|
|
15dc684d34 | ||
|
|
8bdb1de006 | ||
|
|
b26d2250ba | ||
|
|
6a5073b227 | ||
|
|
df06041eb2 | ||
|
|
ade642b7c5 | ||
|
|
c9f45dc323 | ||
|
|
f88fff0b8a | ||
|
|
7cb9388c33 | ||
|
|
21664985c7 | ||
|
|
b140d16696 | ||
|
|
2090f85789 | ||
|
|
572020c4d8 | ||
|
|
1a3a8db3c9 | ||
|
|
ee3709535d | ||
|
|
b8e9b4adfc | ||
|
|
cd7dce687a | ||
|
|
802d2bf249 | ||
|
|
911b0b69ea | ||
|
|
10ec5c8f02 | ||
|
|
6dca787a9d | ||
|
|
46745f91b5 | ||
|
|
181c22c512 | ||
|
|
43eef43550 | ||
|
|
815d11ed6a | ||
|
|
73fefe0295 | ||
|
|
377e5f5204 | ||
|
|
eb12294583 | ||
|
|
86a698d1b6 | ||
|
|
b03e326231 | ||
|
|
3190c4132f | ||
|
|
f30fe07620 | ||
|
|
d0dce5315f | ||
|
|
c9e1ce2966 | ||
|
|
404d8408f4 | ||
|
|
0279af60b5 | ||
|
|
425ee52581 | ||
|
|
0efaa483e4 | ||
|
|
479b6fd7c5 | ||
|
|
625f7c3710 | ||
|
|
2d3020f6cd | ||
|
|
33c9bf1adc | ||
|
|
645e25f624 | ||
|
|
247673ddb8 | ||
|
|
1a5120dc9d | ||
|
|
6572399174 | ||
|
|
04cc674e80 | ||
|
|
46cef90f7b | ||
|
|
428c276948 |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -1,2 +1,3 @@
|
||||
/.github/ @baskaryan @ccurme
|
||||
/.github/ @baskaryan @ccurme @eyurtsev
|
||||
/libs/core/ @eyurtsev
|
||||
/libs/packages.yml @ccurme
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
6
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,9 +1,9 @@
|
||||
blank_issues_enabled: false
|
||||
version: 2.1
|
||||
contact_links:
|
||||
- name: 🤔 Question or Problem
|
||||
about: Ask a question or ask about a problem in GitHub Discussions.
|
||||
url: https://www.github.com/langchain-ai/langchain/discussions/categories/q-a
|
||||
- name: 🤔 Question
|
||||
about: Ask a question in the LangChain forums
|
||||
url: https://forum.langchain.com/c/help/langchain/14
|
||||
- name: Feature Request
|
||||
url: https://www.github.com/langchain-ai/langchain/discussions/categories/ideas
|
||||
about: Suggest a feature or an idea
|
||||
|
||||
@@ -12,6 +12,9 @@ on:
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
|
||||
|
||||
5
.github/workflows/_integration_test.yml
vendored
5
.github/workflows/_integration_test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Integration tests
|
||||
name: Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -12,6 +12,9 @@ on:
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
|
||||
|
||||
3
.github/workflows/_lint.yml
vendored
3
.github/workflows/_lint.yml
vendored
@@ -12,6 +12,9 @@ on:
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
|
||||
|
||||
6
.github/workflows/_release.yml
vendored
6
.github/workflows/_release.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: release
|
||||
name: Release
|
||||
run-name: Release ${{ inputs.working-directory }} by @${{ github.actor }}
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Check Version
|
||||
- name: Check version
|
||||
id: check-version
|
||||
shell: python
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
${{ inputs.working-directory }}
|
||||
ref: ${{ github.ref }} # this scopes to just ref'd branch
|
||||
fetch-depth: 0 # this fetches entire commit history
|
||||
- name: Check Tags
|
||||
- name: Check tags
|
||||
id: check-tags
|
||||
shell: bash
|
||||
working-directory: langchain/${{ inputs.working-directory }}
|
||||
|
||||
3
.github/workflows/_test.yml
vendored
3
.github/workflows/_test.yml
vendored
@@ -12,6 +12,9 @@ on:
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
|
||||
3
.github/workflows/_test_doc_imports.yml
vendored
3
.github/workflows/_test_doc_imports.yml
vendored
@@ -8,6 +8,9 @@ on:
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
|
||||
|
||||
3
.github/workflows/_test_pydantic.yml
vendored
3
.github/workflows/_test_pydantic.yml
vendored
@@ -17,6 +17,9 @@ on:
|
||||
type: string
|
||||
description: "Pydantic version to test."
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
|
||||
6
.github/workflows/api_doc_build.yml
vendored
6
.github/workflows/api_doc_build.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: API docs build
|
||||
name: API Docs Build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
|
||||
done
|
||||
|
||||
- name: Setup python ${{ env.PYTHON_VERSION }}
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v5
|
||||
id: setup-python
|
||||
with:
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
python -m pip install -U uv
|
||||
python -m uv pip install --upgrade --no-cache-dir pip setuptools pyyaml
|
||||
|
||||
- name: Move libs with script
|
||||
- name: Move libs
|
||||
run: python langchain/.github/scripts/prep_api_docs_build.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
3
.github/workflows/check-broken-links.yml
vendored
3
.github/workflows/check-broken-links.yml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-links:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
|
||||
5
.github/workflows/check_core_versions.yml
vendored
5
.github/workflows/check_core_versions.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Check `langchain-core` version equality
|
||||
name: Check `core` Version Equality
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -6,6 +6,9 @@ on:
|
||||
- 'libs/core/pyproject.toml'
|
||||
- 'libs/core/langchain_core/version.py'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check_version_equality:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/check_diffs.yml
vendored
3
.github/workflows/check_diffs.yml
vendored
@@ -16,6 +16,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
|
||||
5
.github/workflows/check_new_docs.yml
vendored
5
.github/workflows/check_new_docs.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Integration docs lint
|
||||
name: Integration Docs Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -15,6 +15,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/codspeed.yml
vendored
3
.github/workflows/codspeed.yml
vendored
@@ -7,6 +7,9 @@ on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: foo
|
||||
AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: foo
|
||||
|
||||
5
.github/workflows/people.yml
vendored
5
.github/workflows/people.yml
vendored
@@ -11,7 +11,8 @@ jobs:
|
||||
langchain-people:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
@@ -23,4 +24,4 @@ jobs:
|
||||
run: mkdir -p /home/runner/work/_temp/_github_home && printf "[safe]\n\tdirectory = /github/workspace" > /home/runner/work/_temp/_github_home/.gitconfig
|
||||
- uses: ./.github/actions/people
|
||||
with:
|
||||
token: ${{ secrets.LANGCHAIN_PEOPLE_GITHUB_TOKEN }}
|
||||
token: ${{ secrets.LANGCHAIN_PEOPLE_GITHUB_TOKEN }}
|
||||
|
||||
111
.github/workflows/pr_lint.yml
vendored
Normal file
111
.github/workflows/pr_lint.yml
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
# -----------------------------------------------------------------------------
|
||||
# PR Title Lint Workflow
|
||||
#
|
||||
# Purpose:
|
||||
# Enforces Conventional Commits format for pull request titles to maintain a
|
||||
# clear, consistent, and machine-readable change history across our repository.
|
||||
#
|
||||
# Enforced Commit Message Format (Conventional Commits 1.0.0):
|
||||
# <type>[optional scope]: <description>
|
||||
# [optional body]
|
||||
# [optional footer(s)]
|
||||
#
|
||||
# Allowed Types:
|
||||
# • feat — a new feature (MINOR bump)
|
||||
# • fix — a bug fix (PATCH bump)
|
||||
# • docs — documentation only changes
|
||||
# • style — formatting, missing semi-colons, etc.; no code change
|
||||
# • refactor — code change that neither fixes a bug nor adds a feature
|
||||
# • perf — code change that improves performance
|
||||
# • test — adding missing tests or correcting existing tests
|
||||
# • build — changes that affect the build system or external dependencies
|
||||
# • ci — continuous integration/configuration changes
|
||||
# • chore — other changes that don't modify src or test files
|
||||
# • revert — reverts a previous commit
|
||||
# • release — prepare a new release
|
||||
#
|
||||
# Allowed Scopes (optional):
|
||||
# core, cli, langchain, standard-tests, docs, anthropic, chroma, deepseek,
|
||||
# exa, fireworks, groq, huggingface, mistralai, nomic, ollama, openai,
|
||||
# perplexity, prompty, qdrant, xai
|
||||
#
|
||||
# Rules & Tips for New Committers:
|
||||
# 1. Subject (type) must start with a lowercase letter and, if possible, be
|
||||
# followed by a scope wrapped in parenthesis `(scope)`
|
||||
# 2. Breaking changes:
|
||||
# – Append "!" after type/scope (e.g., feat!: drop Node 12 support)
|
||||
# – Or include a footer "BREAKING CHANGE: <details>"
|
||||
# 3. Example PR titles:
|
||||
# feat(core): add multi‐tenant support
|
||||
# fix(cli): resolve flag parsing error
|
||||
# docs: update API usage examples
|
||||
# docs(openai): update API usage examples
|
||||
#
|
||||
# Resources:
|
||||
# • Conventional Commits spec: https://www.conventionalcommits.org/en/v1.0.0/
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
name: PR Title Lint
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize]
|
||||
|
||||
jobs:
|
||||
lint-pr-title:
|
||||
name: Validate PR Title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Validate PR Title
|
||||
uses: amannn/action-semantic-pull-request@v5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
types: |
|
||||
feat
|
||||
fix
|
||||
docs
|
||||
style
|
||||
refactor
|
||||
perf
|
||||
test
|
||||
build
|
||||
ci
|
||||
chore
|
||||
revert
|
||||
release
|
||||
scopes: |
|
||||
core
|
||||
cli
|
||||
langchain
|
||||
standard-tests
|
||||
docs
|
||||
anthropic
|
||||
chroma
|
||||
deepseek
|
||||
exa
|
||||
fireworks
|
||||
groq
|
||||
huggingface
|
||||
mistralai
|
||||
nomic
|
||||
ollama
|
||||
openai
|
||||
perplexity
|
||||
prompty
|
||||
qdrant
|
||||
xai
|
||||
requireScope: false
|
||||
disallowScopes: |
|
||||
release
|
||||
[A-Z]+
|
||||
subjectPattern: ^(?![A-Z]).+$
|
||||
subjectPatternError: |
|
||||
The subject "{subject}" found in the pull request title "{title}"
|
||||
didn't match the configured pattern. Please ensure that the subject
|
||||
doesn't start with an uppercase character.
|
||||
ignoreLabels: |
|
||||
ignore-lint-pr-title
|
||||
5
.github/workflows/run_notebooks.yml
vendored
5
.github/workflows/run_notebooks.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Run notebooks
|
||||
name: Run Notebooks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -14,6 +14,9 @@ on:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
|
||||
|
||||
7
.github/workflows/scheduled_test.yml
vendored
7
.github/workflows/scheduled_test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Scheduled tests
|
||||
name: Scheduled Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
@@ -12,6 +12,9 @@ on:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
@@ -159,7 +162,7 @@ jobs:
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
- name: Ensure tests did not create additional files
|
||||
working-directory: langchain
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
3
Makefile
3
Makefile
@@ -71,7 +71,6 @@ spell_fix:
|
||||
lint lint_package lint_tests:
|
||||
uv run --group lint ruff check docs cookbook
|
||||
uv run --group lint ruff format docs cookbook cookbook --diff
|
||||
uv run --group lint ruff check --select I docs cookbook
|
||||
git --no-pager grep 'from langchain import' docs cookbook | grep -vE 'from langchain import (hub)' && echo "Error: no importing langchain from root in docs, except for hub" && exit 1 || exit 0
|
||||
|
||||
git --no-pager grep 'api.python.langchain.com' -- docs/docs ':!docs/docs/additional_resources/arxiv_references.mdx' ':!docs/docs/integrations/document_loaders/sitemap.ipynb' || exit 0 && \
|
||||
@@ -81,7 +80,7 @@ lint lint_package lint_tests:
|
||||
## format: Format the project files.
|
||||
format format_diff:
|
||||
uv run --group lint ruff format docs cookbook
|
||||
uv run --group lint ruff check --select I --fix docs cookbook
|
||||
uv run --group lint ruff check --fix docs cookbook
|
||||
|
||||
update-package-downloads:
|
||||
uv run python docs/scripts/packages_yml_get_downloads.py
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
eNqdVXtUE1cax9faVo+PwlrcbTWNlVZkwiSZhAwxbREQeYQICRB0VxxmbjJDkplhHiGBuirW3fVR6NRHK/VYK5BY6kFaWKx06aq7ler62NZlF9pdPV2sZ0/Xx9YHdreu3ZsQWjj6V+ckk8zc7/H7fr/vu7c+7AeCyHDshIMMKwGBICX4ICr1YQFUy0CUXgz5gERzVMsKm93RLAvMYDItSbyYnppK8IyG4wFLMBqS86X6takkTUip8D/vBdEwLZUcFRx8oU7tA6JIuIGoTl9VpyY5mImV1OnqMmj/tKiSaKDivBTMpvKwXA2rAgEiEkPFuVSkzALGxQk+dYpa4LwAeskiENRrf56i9nEU8MIXbl5CMA7xMSwDrURJAIRPne4ivCJYG6YBQcESG1toTpSU9vGgDxEkCaA3YEmOYli30umuZfgUFQVcXkICKapaUaLaIGAWRIlR2jwA8AjhZfwgNOKrdBA872VIIrKeWiVy7MFYgYgU5MH9y20R/Ahkg5WULhuEkpGbuiIIOWZVWk0aqkE7AogoEQzrhaQhXgKiCvHR9ffHLvAE6YFBkJh+SmjEuX2sDScqrVaCtNnHhSQEklZaCcFnxDrHvhdkVmJ8QAlnrrg/XWzxu3RhvUarhZ93xkUWgyyptEaZPzzOG0hCECE5GER5E20fJcgLWLdEK81aHXZAACIPWwZsDEE3SRbrW6Ak4PRH4Vjv7Lflj2p5IS6xJQvKo/Q6aDlFpcNVViKo0qE6g0prSEfxdC2qyrE6DmbG0jgeqMM7DoFgRRfUIntU/TBJy6wHUG2ZD1S8N6I4rCYCH/YiAgI8JwIkhko56ESKR4YGyc3qHGkyhBPcBMvURtMqvVHpa2oDNRQpUxTtr/GheC2mZyqBTLq6Yi68wEXSQECIT1RadDoca48tjbLfBotFES2KoNreAOxePxdEZH6k8xGYw8+QAImKGXE39QQQAfLlZXwMJD16j403jG9AUfS9+w0kzgPgRhDG0Oj1wVgLAfigshGA34fBcBz/7YONRkPpoQluMvaMtxLBWDRanU98736DWIj9qHgwMGqNMJQy+BR8qNAZjDodpccBwE0USWldqMlIpBkwFFAuDAP4EcgMQ8IoEcV5TpAgRyTcy6SgMpjiIwKRabTotQa9EVZqVjEs6ZUpYJcrs7hIDaJZxQvAyxHUIdKFkARJA2SkSZVwVnlhhjU3s80OQWZynIcBr3w6IbGignRVVPosRo+uJruKcuMrOLEIz3IKOVnLNC4es5dWcIyXqyn0SwZraX4p7ccQbRpmMui1egMUVoNq4HAhrhyrS8xzLZcyZWpZmkHAHCa0iGVdSwMlJiNZ5sQqUK9dyrU6soPVOCpUlcullZ4MqsKzMsvHFZUVL6UraatdtubZglWFOF29vEAy2opILcMHUWsaXhL0cUG+CmM8jiJYIiHRllQz3Hx5BpJuic0WAmcLGZks/ehkmVVUlBiLZvx2alYth2eDjfUGzSp7hGEAfwkfsDMSsBRyLBjcAYmR/QxlsVUSORm5ATLHSaOE1Z1dbs3RWWXjUr2MVUmUhOb5nXyw2qErNGWPYQbHMQSNkWNEMVO0Nb+H/gNRdTuRsVsFYuNHDsEwy4ks43KF7HCigKC0kV5OpuDJIIBQ5jKkOKNc6cKxNCPAMdRFulyUUY8juRlZHaPRvttYWiLHSpjwwsbzk0onrbeo0zFMrzarfITFZIQzFj0qN4Qijcq6P5x4bP7Wh+Ki1yT4/fbbbQ4rtyBteu83ZbPSP9tw1fTTpw9bP55oSXimh9na7Ux8fLP+9ysfLdlM9j104+K0l7YPvP96XN3c6V9//dWF+Na+h9cTbvbYwLz+X15y1wxfb3yj/S/s4btXV98rIOpunp+/tqXiunSnzB36zdKmdSe/zO/ftPPlJYvylxw3xif3t2/9V/9/vxSqj6xsfH5Wzge75Fz34t1TGpasOvvq24N3a7udz5XdOb5ry5FN65Li4hqHDt/Y1jDvNHI6nKo837om5/Kw4eGj1FeGyo4EzZaiM7cmr9ycV93yi4vxv65xJHQXDc/+54S3GyZ+Uv+EunoxfvTZTuHK0TcfxejD+ya+Vrxr6owDl2byR/ac+/zar+ZvWLCmq6Do88eSX8nSooVPEqXdG/uuzDTk1tUXPJH/v+wpmllg5nPuu9cnXqtLnoxd3kvzKUaxLemxAXvS8t2TXMNTEx/B6YWZSYsHz3RnvVXMzhhqsWXuO534xwS05yfp67vyZpuu/eE6vW1DvHnyli3ystv74qvBI9TCvDXTmrRzuhZsIhdLtydvdTkuzHZ4+045lv24+Uft97o33rk+4Fk4q6v8+EfH3i1l/103r9s+cKckfDsQ9+GJ7XMGOodfHmiT5trPzTn1JPaPaX/qTGhIJg+pdse9fups8pWTMrbYkiS4hxpDq/ccWh7yP+P8KzbjwhsTEpsf//vcV2/e1N+41dOwumdH66KKnU3dTX/mdITiLDt04Cn9flv8+nNDn86s8L6bW770k5dO7TpRvXVmn/+yNHj26H9uzC0puuU+WmlZ23zxhKL+RvBXb8v+bLt551527oyGL6a/tuete7vOruptPIX7L56/N9mZn3vSV5zUN5x/MtxVcFtbm3q1/wulY9GOGi/TXHBk74vNc6aue0FdgCe0djC4f4f98pD549CZ/i1NJUPP/u1nTSfPX1oX7dNJcb/rM882TImL+z+l5DCL
|
||||
@@ -1 +0,0 @@
|
||||
eNp1Vs1u20YQRg899NZHWNApmgCkJJKSbRnowXGBIk3dBLGCFG2KYrgcSVuRu8zuUgpj6ND059SL0DdojDgwgjaXHnvzoYe+QPo0nSVFOU5dQID2d+b7Zr6Z5dPTOWojlHznpZAWNXBLE7N6eqrxUYnG/vA8RztV6cndO0ejZ6UWr6dTawuz1+0mmGrFZ4EupRU5dkoTIBgbhB3I4YmSsDAdrvJurlLMuiDtVKtC8A7PoEwxiAOjpEQbRL2o34uiYTAPP4j3e12upAOFJ4lKq9c/H3s5GgMTNN4e++rY0ypDGnmlQe35zKPjFqVtNi0+diPvwRTsh4bZKTKVpcSDzaRaSIaPIS8yWhwzXkoUY6Vzb/n18msyZCpjMXd23EzIMWqUHA+UHIsJLR8vl6dThJSw/fTyoPEajKoCV+fvQ1FkgoMLXvdb4nV2n9AF+xM6sjr/5aayKu6GnXi7E8YsT7tugSuNW+1aCd2o02PKdHPgymxFcWeb5nQUNJ9ugc63+ywDOekWFaVDbsWdMKSfO1FUgjhtHdytdxgfT7oara4CF/itDCfAK9Z6bFGcfRHs50+Cj8E6+JSCQW8QDUfhoDcMh1/+AbRn0lkg5Fw1tAKRrs6v7cSczlL2YJcPg348SIMhJONgnMRp0t/l/Z20/3t7eS2h1fl7YCmyhf0o/K2N22coJ3a6ehb2ey80moJEh98/NxZsaZ6ecEL+91+n68T/eud2G/gfXz0OyLwM7jW2b6Wr19eSKNyOx/0oGAx2toP+zi4PdpP+OEhwp0//CeIQ33Z8Evb7u2cHToG15FdnM8QigEzM8XJyX72d25M6aH+OpqXPoiE7hIq5+LFwsNcb7kU77JPDUa3d74iPFnLyz7svnIhpzEnDx15G9yWvDmmyu70dLX1PlbYordtbU3bDjbA3uvZG/6Nnc0nQbKGFJb8sJUcsAT5jVjHQqpQpi6NeL4h7vR67ecCAFhYkc5YKwxVVHaZMyLpsNE5cJ3CGQXJBQNghGlUoC7kAdt2JS0tyUbFbGh7d6DyUD+WRyuvacgYQdCYukF6gE9JwLYq60dCEZ9QN9tztsMMcwdvCTNkIkgwtu847LB7UYG/s0a4wzOSQZYyaSMVsc2hcE1vjbsEelTlqAeRY2Mphqs3S/QQJ1pyIUkwSQis3iNeRfTOm60h22C3LXD6AwDNqYlZNNBRTwS+zcfGcoGpyzciamEhTRyZquN3X5WzNzTTkYpeOsGW4T0WuJ0i+sqwRZp3ZC7KG4HGYU2JTNtYqv8S5pVp7eTNDD0DPwF8nygExuLG3psUKTS0i+I+IfLYgmlO2AKJHh5CX2ijtoudcX5xvwlCTjRuymxQcbA6NSMdr4sQ5iIYXqXWYgKRortKQA0PFQKqkJpaaNtlve/cvtDAHLVRprtQDmWSZmDXp8NmnmKcko8/BaL9O4dG01GVRwKzmUbUiZZDmQgoqamoGc2zB+GTLWNNcNaJ+XyTo5lCT/dEVWOuIkjkrSM8ViWWOmSoorUlVc2sB12SvKsG6eMmGayeasKnCicKtQgr1mCwpMqWJ81xk4gm0NfdGhfvMlJReaB7L/dkMUufUZzchqTIlm4mzum9MVQOqq0FjTqpp+gU9xRR4zVwHYbHvmktFqaN7OcxcIxL2ijpr+kLbq5rXdwNuoXSWduhp9tcPvgfGUJjpM8JbUsc0VhX36HNDSdpCmX5jSy09n74K1t1TSOqoIzVDSW023Gl7bLsSxwPfsxTJbLMyiJbLfwGzkz5r
|
||||
@@ -1 +0,0 @@
|
||||
eNptk3tUE2caxkEPVqytLtKDLF5GBC2USSYJgQSlSwokXBIuJmCAFRiTL5mBZCaZmQBBscUrKtZmvSHqUcolFpWArciC1kM5rq7iahUvwbWwVnrQKrS4wiIqOwH1tOd0zvvHN+edb97n+z3Pt95eACgaJwn34zjBAArVMOwLbVtvp4DZAmhmY50RMBiprUlJVqqqLRTuXIwxjImO4HJJ1MJgfI6eJPUGgJpwmqMhjVyGzAdEzWpSa+12n6mnUILJYawmEEkBHQVoLGeiv0RjwAHbwbWR4WGhiCgMEfJEQgQOM1OhpjC9CQ/DCCESZmJEIM9iFgm0uCCUh1koDmoy0a8nWmhAaUhWNMG4Br/5JQ00FGAitbBULham8sTmcEWqUWrShmNxMYjKuuR3OiJ5gXwpW4jwE0thBpkpEGSqZdH6jHjJColEJklTJhVKYbk4nkqJS0o3pKXKiuNCY/OVoEgLVpPJ+QQRHc/jh0qJzEQyLVFCZxTChQKMKBaspuWq4rTQMLW1KDUmRy8UKxUiOwZQLQt7S30aqxyW6Fm1tmaTlaVLwK9p01w+R8CWQ6LRABMDxxIaUosTetsJfTFuCoG0QGdAGVA32bZVB3OD66NJggATttnq8wEwwagBLwDHoyfRwCoWvs25iAVnwDWo6zNuEVxYWAjrSMoIWygDcM0A2qYi2MUVZo2EJ1HanEF6AzwpkCvg8HhsQS7PXSJFHARiQWthl7lcS8ObcXJA6BnMVs0PEx1lOZvYMIENdTSDMhZ6fY1rUudFuxHQNKoHXyYnvoGyw6GGpRRqBHCyaTKB9UqJIjZ5RbwsPqkuhUL1RtR2lCBhDarBQKMaViuVcApFMq9PXoXYY4tMODvRdlZBEiEQwoMSUALiicUIhCAREwXJFKomFRtJWsca8JatXYNZiHygbVbDv4X2VomdIGkC1+ka33Tf7qxxuVKnBBR7iWyttAbV6UiDqwORBBSnUqVMtvi/N+Pcb73Io0liGaTBUIpmU2thdLCoJh2lrLvqkilcjxNH1fDkwr4CsKoBVRPDBsB2VoVZQiC+GFKgVoiP8IUQTxiBiCN44a5D2iUGBlYWaGxODiaI9I8IDRX4L4OMaCRfKOYjCBKCCWC++A8aJ6JdeCcwUKSB3f6GeAjErmiGpNiVES2CWfciEXZpoRk2uwVs5LSsrIlrX8q6TbEMzk/tWrh9upvrmTJ+NvX03mPq2Z09Kzv3F8k27LH6ekmhlumk1/s/rGopd0OHH5/jVnpmtzilJa3H2urbfh7MIclBdJ69ZOHMo0l3R3rIwcS4gwWL/iHavaxzNOpJ4Ie5V4N7FnbcBWnzm9VTU8XrTlkDlo88XmRaGtszS2S5QO/0SZhW5YswibntFqJsanlX22eH6u2NP/xUUvrdtz1jmv268CNPN4deyp/q2P0X88m59J6jilIO/cXJ6UEHZPN6V14xB15pqv3oObJ1SP1E2a1TvYxZAA4+6UgcuFch5829xy96EtyycUXBO8SnAw6Z1wXk0E9pGbG5h5AXUefGn7WKD7UN2YYlo60VNcn2wbyF1ZySs0fmbm4YfzkqVtzgyD7cgt/Z85IjvaIXvX9eP+4x9t9fQ35k3knNbibHhQjxBByoWtW9MfDF1eG5X/JG+3NF2v7vzxizToGhr0dPK9obX4QUJ3XInv9PcunBzi0P/T+wzd6XF+R7K/j+Ee81Tb4R4/4D56XBy290rL3g8J4Tdc5r5bW4FOF5p+NRLH7c0hG/MwW72aXr7a99VFa71tbYBbIPhvvdviS/a+Svkgc7vBIanns7+oPmrO3709Llec/SiL/pYwLOktfSYnK52/yNOd7+nsHGFRnklCUy912+UZe5vZdmf2wdiFKWZ+0oc/y5b9n8BQdGH737bIZ6QWCDqHjAus2ndUeZ0s8zbMGc4ZP451tzd/6zb2f6fWx8361KA7f7G92mEz43j+ObAJbSl9VpvlmOg1PZJf0X1v+9rLJmhse32xNah1cNbmq/6Bwi50E9HhWDCfKAX6qT3R6ITbv95i/pSe/u2DvCedjRUqrov3NKbVH/u/uXMeLVxuU+UNMntSt/9YK8vzMkeZu2OVt6x8oey6OoDbozLbk2Q+U2X01w/oltX/Xm7k/ogeuM66Yda34pb1fd8A+/7Hfw46y4w+I0zbt5nvEZvOH+HyXuiz/zaH1vlkIxS7ouIN0vqbzIb8TZ7pnJwTLVB6ZlWtY89nhwpyTqouq6t9n/SnNMoaBWt+FgtjNXPM26N+urj/rGdmELQqaZ8/EdZfNVw90nlzYs+9fpgGvk+X3cxZVVn29vaMRX7c7TzS/nOPx4/8EPret95SU689f3Cu/kC4RJ0feup9ZWP66a8dQoLe+8t2G7rZR79fDAdagrnfKaUtFUK7+cnvO1r9PMlG413obngPbU222zstNvHQ9qOjbw/T76UWD1TGZNUrf9XlfA4r6Rh9YvvJ9vvh59ax6e0DfUURy1x7zFmt9UkTTzeQjq42juFjx99fPhT93Dcu6vL/dwc/s/IPGG9w==
|
||||
@@ -1 +0,0 @@
|
||||
eNqNVQtwE9cVNTZQkpSWYBIIhHijODYGP0urH5JpCcYYGhx/RhYYSMF52n2SNl7tLrsrxx+gsQIxnRLjrSH1J5gBy1JQ8G/shIJjhsC0SQmf4iHFCp/gDi20Ic7Q4rq006pPtmxDMZAdaUZ67557z73n3rsefxESJYbnJhxkOBmJkJLxH0nx+EW00Y0keavPhWQnT3tzc/KsjW6RCb7olGVBSlWrocCkQE52irzAUCkU71IXkWoXkiToQJLXxtMlwR1lKhcsLpD5QsRJqlRSo9Unq0ZMVKmvlqlEnkWqVJVbQqIqWUXxmAQn44N8J5QTJUJ2IoJnaUyEKOT4NzgCFUOXwOJDO0G5OcTYedGl2rwee+VpxGIgxUI3jYAOGIDEcxySAQtljFdt9jsRpHG2V6Ke9Dp5SVba78ugFVIUEmSAOIqnGc6hdDhKGSGZoJE97CWZKJVkOkCF/Q4VSgkUIiQAyDJFqKMYSDJkOBanB2TGhXi3rBzIzrEWrHh5dUa2b9i10gYFgWUoGIarX8cUD0aSBnKJgO6/DoRLA3C9OFk5lDZCVp1bglXhCE2KQZtCtt0dmoWYt08Yuu+6+0KAVCH2AyKKK75hcMvdNrykNGVBKifvHpdQpJxKExRdRv09WYpuLpyo4k/PvT9c5HI0nF+XQpL4036PZ6mEo5QmO2Ql1D4qxSgmoNVodUBjBBry0D2+kSyWAIrHIZR9mpaRCrKIc8hOpZHUa94XkSTgTkZv+TBMdkseL5YUnfrMH2m//TmZYw3xjHcZllfptjrdyYTWTGTBEgKHNhCkIVVjTiX1xIos68H0SBjruEK1W0XISXYsVsZI9/gpp5srRHQgfdyOCSaMZSzi+CzjYmTAcIIbN8PQyIChI8Wn14Sf4PxH2ovIhWsUjj2CSfgOGAnJSmc4X6AxAK3ZGsmaXBdMHA+NG/s+ik2moWgLHm0/RjGCSfwumAdQ1K8LqsaDR7bXCDuv/oGlGLUc4+XVmc3mR/h9WNGI8ZD/J+hw6vEPsbxbymFr4qHWD+QTiJAGDK18jH8XaEgyPZfSivblNmdxxjqLNr8YMktz4WFJFhkKuww3ssCLMpAQhVe+XKIEk/EeD2+hH+tIg86I2SwiGI5i8arNc9uW8WGq0iJCEBHLQ/rwGFFedECOKR0akzCBYLzObNQZaJ0NIJudBnqzaSEwm7UksGm1JlpvIhfqaWNjEQOVAN4WhIPnHSxqpeyAgpQTgeFhVvzL1manZb2cfnANsPA2HgtihVg4jueQLw+JeH8oAYrl3TTe2yLypS8HlrS1SqdZv9CIzAaSok0a0kzpQEa+pW1krEfH1hte+kMvsHJfuCSc4zfR9XG/mBI19MTgbyjE5mXycxZO+++CmIbpr6HPxB3RR2acbtzYnyQ6H4fLKju3/lpXX7vvWnNfaENV9s3J3zAT/lbfN/hlRVVe79u/q+waGLj86SExtOVn/wSXL/12MK7/yn+O3hEH35jlUesMM2r7lOPTrCsXLelgK2/CKdXx8+Ec2yd1TnfHkfW1mupW49en678esMe+PW3bnt4T/zhbEZe5p/9icYg9cV1ZslIlmdp/OuvGc/7DVeXPEqt9m6btubo/qIt7fl/sTbLh2KTcJtFTf7p0Kv/V5qhrE2daPLvAydpd0+nJcY9VUQlF24O/+ujOD5sTi1fF73+pvG72n1Ydn7Q4aUvZ5kl7V2ijl4LD7WV//qKgRpLN889Qla73zn3Q88Rba/++te7z6C/mfZI2Q7P8wKVV+lm+JMmYf3tTQs0HZ2x0zLyMc+dfW2xfPHvXJpWQYdzRsK35rGNxbJczoetfJwvubOuPrvz3Uu73lz49teuY+EJI/eXkjl+unfltdML3Kta03frm+2kH3rljCUwVnpGe7VN9GD/vhneJI8lROpubnNNz7dv6hhvCPtm7+x1Tnif/LHl0y5lzT0t1LVezU+SEuVdje3svdJjKd95wNU+nY6p/Tpw7inZWd94yrUwzHOs+35B1+8rUOUfMx1f8Rauv/InlWs0L717fQL9+pfOCnRk4teZk9Mrr7ETLU/MTe1bffjf90GBmhdqy94817z9V1X/d0/2j9Zv8PTHFSS3zdojlN7+amynk1tZ9FFpwobXvVuvzNZ9/nLc09gcXl7xXtGH3icEnGvndJkuvCNCUD0P7L1ZUlbQclcxPvtq7IaTdun2jd731+B/qL599c2Bn2YuvhLqr//pSc1f3XI/SIHn3OHrapl4mzs94Li40IdyBMVF7E2duz58YFfU/s88KiA==
|
||||
@@ -1 +0,0 @@
|
||||
eNp9VQtUE1caFnWFXa2i9YVonY09IpLJOyEJsCziCxFkNQUfRRySGzIwmRlnJoEoqGDVrYoyVay24oPnlqWC8qgiKr4ttWrdKgYV8aACuroqbn227p0ElLO6m3PIuff+H/d+//f/35+sEjtgWJwiPcpwkgMMZuTghuWzShiw2AZY7rNiK+AslKkwZtYcQ4GNwZ0aC8fRrF4qxWhcYsYZkEoxKawEw6U4aQYMII1AapdLjRaMkxopK00A15WFiZTJ4axaKrJSJkCI9CLMaKRsJMdK39whdYVYKUFgVgy1K2k5GihLRHGS5RibkROJRXABMKtIb8YIFohFVsCyWBJgRfoFS0UMRQB4rY0FDEQaKZgOycGDOEjEj0U4C0AowgRTQlJIKpVEQBomkEMoM2K0kQA3U4xVlBEvFpEivVx4iqJFetJGEBklFoCZoExrCi0Uy/EV/514MUwF0BxfMFE6sdy9RqEMlAknk/jKpCU4LUZMwExgHBAjS1jOVArZkcAlNV+aAgCNYgRuB6UCdxQmRHJ8Je2AspOoIHaaVCZRaCXyPV1JoQQgkzgLXyDXysq6zzgHDQRqNIEbMeFmaTJLkX9jAEtD9cHKYpbDOBubVQh5gbNnSrq0y58V2Z1ec69RhZMhR/6QwWITIwodEoU5EIVMoUbkar1Mp1epkGlRhrLwrhcN731xr4HBSBY2AjqlW4ISo8VGpgBTafh70y4On4rOxhx8lU4VqAE6s06hU5lUSqBBp8TNLofBcMxoAegcF3++ZPK86LCoiPCDYVBolkUFNrD0aBhBUKnoLAZPwkl+98S6NwVCrbjJRIBUjAFoV7Pz+XbFd28BNAPbFCpIpQDY+vkKde3bGCs0CUwCpWi3M5zXYQtjaQlutEgPBVJpxYiIA1Ya+oezMbALEblEJpxRdEIK3HWvaWEtCYQ7K0527VwxBw0lJFwn7v80u+xHGh0JNCAxgnO8wdKwooLH3gkwgIZOE0j2CLlvs+IMJZQ/gcOYJABNgQh93TNAMK575BlOpEfugIGCCfIISgsicLgV8EVKiVwpd378DlCIQhVRGGC75OSLZBKlSvvtHDfCgFuFhnAGcBSHEUEmGxOiCdQpJLIgaExjiMggHCOzu3oWgXAgOpQGzWSnHKiNdtvf9RxuBG42hUq5UlubhkLpAQGvh+5wfXfNL1hPjcwpejfurh8KzSZUDZj4Qo1MJjvyP4HuJuGLBJSspieMEjJzLfl8kjrcM8QAK4aTgnJv6ah1zvHvx7yfknPc/0V381LrdIHqsrTuh1DcxDs/BiBQBxRmGapUJZpQlVqtRBONMhWKYWalViXXBJp1gYWxGOPgy8Pcc6vbtMXuivGlRoKymeDsYkBFt/HfGLtQmG0lsNVQ1m7kKy3KEBEcEkpREGLFQrQalUzmmvqZcPYwEH+yT8vYdV69XJ8+8O/16/W5V0nfQO/VCaD+65r6Bh79u51rHNpvyFA4e75aV+o19VbZFvWXlXnbcsv6HW6w3Zg4KDZnS/r0AVVxB1uwO22hN3J2fjqAvPnXczsuRgTlnY2Owxj9zvPjA0JD8pCays5/tI4931SStGdx3YtNdQdWdY5p4tuWTTfxlSnbR0wqaBl+OqmkrdP56J+vCheHeM4gb15Y+lFlxybNH6zzJP+OyLvo549t3PQjod2xPSpu09ZLpiexve2jQ75Z8Oq370+Obm9Ai7PvRQxrvDV853Y6f8gCdSm2Z6nW97boYfy0ev+cUZ+am827AsTJmQseGouv7HviPaxs3Nza1IQ/rct4Lh58lM49w3WMumL/KdX7gldsu/ToyJi0sx53Kjs4Rei26JvO8yMe/vq7GU1Hp4yP/qm2HUM+H71Mm5U65v6iyEne+Uuac2JWbL35rONJXXrki6iZv2/8pClvQ79mZuOBV4sGrpiU4+g/ZuMMjwt3z8XNvXa6r3Ra0V7p4F8OKoI3JosPfnHL2R40dHbQ/DtAcjl9afNXHmHfzxh67zOv9GPX79fZLBeGNM0dEft6/6qwK2n7ci3y8qcLW49ebvSvPzkh2NscI11cUdRn+NzaG4m/PR6U9eDFML/42nHPLpw0DMQrxq20z8Q9M9NH1ybOnzNp4hiN4cCOylMxqHr1J7za6/lfNhetvzbsxaLdtuA9QQGbxpBesZN9SgvQ/ovajRuSs4YHGrgZVYM/HFAwoKzw0pG2uqIPIoY9CR3p2/Do9sDqQZuP3Zo3it21uSV78v2x+4oTjmuCtkX7Vbb2Iai1rf6jIjw6t1SN6Lfg53JZgL/85XRnS+NeP3/TM8vB/EeH1n8ZNa8aLcqe7xGQ4e8YQntK+76u3U2FPvU+vLPqni634UHHOp+FCdXi3ZlPj1zL/+WMIzK47315ke+p1folOyv2IVO+lS08VXe59Lh+be/PHzfp0TmTFuf5Tnjmczt/RXJAs3N5eHy1393vTtDFvs3fPFhftPWl4u5x/UfeN15m+iSGVmz+2jNGXNN+f9GBppH2NfszNZvT2iM/2H8o+8NOK1j4xeP61Rknkq/2fxmN/ZAYj6/Zt39+5/QKzzifY/NlhoWrQjoOJ9nqy3atmjezxf/qBPhLqX2R4CnNOG7dG3iiJnvZzOrld6vn/nhmqsLLOTjYkn36wdaMU6e3TsBr5DFNmTlttg3LR1weSvzgGfD0euyxJ2X28t5Pi24g9Pm2l/EP18a09L604Zqtteniv65P+6PhHHnz5zS6cbmKDK5TrKRDr0xrjWy4O9yv85SPb/FV3vm8LDh4TclDMn3ZsoxfpS4b9+mVbcz9c4Jnr17/AcjtYQ0=
|
||||
@@ -1 +0,0 @@
|
||||
eNptVHlwE9cZN/EfOEMoDZMMmSRutusATUYrre7DqFP5kOI4wsaSje00MU+7T9Jae3l3ddlOUoiThnFSWIcwCRmmAWSpMXawMTnKYcpRB6c4hbgtGFzKUPckTU0urgb3ydiMPaDZ0b593/He9/v9vm9dJgYlmRH4ed0Mr0AJUAr6kNV1GQk2RaGstKU5qIQFOlVZ4fPviErMqC6sKKLs0OmAyGhDktCkpQROJ4iQB4wuptdRYaDo0JbIwqlcqYBAJ0ffbcE5KMsgBGXc8XQLLgksxB14VIYSrsEpAR3OK2hjNYpeLmNKGGICS6MLYBFeiPMYTIBsRkwIYlSUh0xQkDj8uWc0OCfQkEWBLAs4QBi1esIWIBheVgDKp8F53KHX4LIiiLiDj7Jsdi1BwOGOIGBlqMEVyImobiUqofuQWutzmTAENAJlQyosyIraPbvMXYCioKgQkKcEmuFDan+omRE1GA2DLFCgBmuWFboLVcPDKSDVrgiEIgFYJgbTN2PVXiCKLEOBrF3XKAt893T1hJIU4e3mrixGBAKOV9RdLOBDCGCG15Fao9bQmyBQoQzPImiJrE1Ni0lEF79vtkEEVATFE9NUq2lSq7dpyfdm+wiy2ukFVIVvTkogUWG1E0icxdQ/e1+K8grDQTVTXHn7cdPGW8dlECl69PTNySwneUrtnCLhwznRUJGSBCWgJOo28r0ZbFjIh5SwukNvsf9KgrKIhAVfTKMwJSqvSyE24PFjmWmFba8onyHxXM59qRLEjHrAH45qMIMd84IkZiANZkxvdpB2h9GKebz+7uLpY/x3pKDPLwFeDiIaSmeIz1DhKB+BdFfxHcnuKQZUGBLZtEjp6qhVlJjYlEI4kMiS6SQ1GC+gugUJTq2obAAyR2UFYRBDWWjkn4oBKammKyQmxPA9CSIrRGQOZY/rjsoEhS4tAZbQ700QSMSQZTgGgTX1P93BiFi9yUSSH93uoQgRiJo9ZSFJcmC2WYIc4gPVOSeJ0W7ff2evmURmu820d66LDGfdZLtF/uh283R0p95CcnJ3YsadYGh1AK0bSH1jHNphwhyDChcwcHSTVeaMQXtTZBcVvAkccVMJaqakbqXLW1bc5UN5iwUhwkD1zLzFDQ1UsCHAOV2gtMiXqDSVB+ptq2KltYG4PS4Yy40NJS7SZ/a6qzi32ShwlgZmFaG3mmxmo95otRJ6LeoYNFmqGynS615dQ1X7npICghgyS9Eaj5cqC4Y8T9FRUBtYGanR2i0lsMTlCnuqkrStaRUng6Qn3FzTZE3QriK3X4wa7I2Uka83hJQgmeDjItnskb1UdX0iXC4oETdpp+tNhZgIlLBTV4gmn8ggnJzTAiaQgImb8jXOyLcQo4UsFc5bg6oQewJN6QqeTRZiPkih4YbegIM+RoHOlQIP0z4ooQZVuyhWiNJoekkwXewmqlx16h67yWqB9qDJYLcZrMCmJ0pXV/XOdMitDkhlR18GsAohxyi1P2x04g6TyYgXIok7bRakuKnJvxY1qYT8f3tX5JH2vJypX26bz9t+lvz+gcur75HLFsR6Pz9d1jq2+wR8TKOUL+p8oO7Bs2eOdRQMva0pv9HS91Xf1/78n18OnmsxNnuW9pWcvIRdc318cc9GfKjk+OJlf72fq9vPfLlM+6fSwa9SQ1WDZNHTBzfmLblQdfr5E/dtMj+wTTyaW6YZbX5/yLi559S939Z0DC5+/+/ODf1L263R7lcXjbWtH6lbeoy6ej3/DVU5aljfu/y/yfLagR39W3pLvjiwt6n7bnrZwt/nr/1jR+vPtvbNz1XGX8d6F75ycmNj90VXMi//0IXMQ/GJ4xEvuHfeqXPP1v7vriul9YPxZlPHnpeXZ1oP1h6WMyvEJf4jpW9uGRpaKoY8BT8sf/3RT2s7fvPnUy3n3YfXtmz8ZMH1J/nDL/3abi1S78b8ibfHvQH8yspz37NMPNt0ZYXtS7pIc/WhFbaByd1twrc/2TWOV4IvCn/X6h/Ydy3n4ppL4z+6ceiZyGuLJ9bsFxvfjBmGk/70Sy0jvuF8YWinfDK+IC//l9/lRfZ39J9esy2d/GSScm5Yf/6dovljOVffuSab/nPP1u1vVdVdeKyIO3to9AzFuHsK2E2rBnY+3CQs3PzPF/Mnvl5++NLeq989UVDt+qYN7qbPRgdr2H1ln+peE8UPHim6//NfvPqx9MYLm7SpR8uspVvokeGfLvrm5ZBm8sPnt24K/WMJ6F17cVGROvaklT30QbKGzrRPdMWOxj/7wXEa/PhG+998bTeG5/97p/utzWPn/9B6ZLjyUnX9+J76oYcLVtT09F9f75ys8Fx/HOjmn/lM/0LniVOM7nLkL6Hh2MEjI/8aaUCqmpzMzXn8WGfuUG5Ozv8BEYit3Q==
|
||||
@@ -1 +0,0 @@
|
||||
eNqdVH1QFOcZh1gnjePo1MFoGRs3h8Yv9ti974OcjhzINxg5YA5T6d7ue9x6++Xu3nF3YvyIHUcl6BLJSDQlHoTTkyiK4igKDRVDY0ajxigxosbPTEWjJWJriX0PT2vbpH/0/WN3dt/n6/d7nt+zKuQFokTzXGwzzclAJEgZfkjKqpAIlniAJK9uYoHs4qnG+QWFtgaPSPfOdMmyICUnJRECrWZpSRYJRk3QSV48iXQRchLJswIDhsM0OnjK37ttqYoFkkSUA0mVvHCpSuQZoEpWeSQgqhJVJA/zcjL8UQK9p0mI7AIIz1AwN+Lm+AoOAT4iEhHhnQjp4QDt5EVWtey3iSqWpwADHaNFoBJLMAzKEDL0hZFlwAoQkewRYTpMbYR/eKFMUCXjiSroAAhWlewkGAksC7kAQUEe1jS6eElWdv07st0ESQJBRgFH8hTNlSut5QFaSEQo4IzkSkQCkkyFIQ4ODLOnhN0ACCjB0F4QjqBEIXROVloFP2SSQyP8+ZIwtcakxpuj8FHZLwClhRAEhiaJSJSkxRLPNT1J/d8Xu576MYArl11KA27At4tAEiDr4O0mSYawpVWNsGDweU8oSn+wIOcp0r6YuMY0WLxyxObyJCIaM5JH+BENptEjuD4ZMydrjUhGnq3ZGk1j+8ny9thEgpOcEGH6U25CpMvDuQEVtv4kH0d8kEYv70c9wpMWoJAfL00CVKZZoDSaDBpDpw+FXQMMzdIQ3/BT5t2Ak1CW5jyw5pAGGz69Cc9bioAlaA7W8J/WuNlsNuoNHT8fFoJ0KTtx7NnpVf3vyMMOYfOTA2Mfed48agTlI/pRMjJQDRqdrj3SSklCI42DCoB0MHwFyot0Oc0p22Ye8KFuHmZ4xktktDjSDykxYbp90VtB5H3+Z1dBXLsnehFVK0pTSi+iIwidTo+ZTWY9ACaNQe8gnbgJd5gMgIQPfDfpREmCdAH0yZwooTR7/ty8LGu4EMiolefdNKj5OjaurIx0ljlYSxajznQbCyTSWuQpmcsHSvBsYE2rKM8vXmzNUQe8EmkQPVmlLj1nR3GjzqTX4lqjEcXVmBpX42h2cUW2UzCkaT05hMObmxfITxVL0x22N/gsI29wLvaXOnJLOSw1Q8ZpE2YtVhc5fXZtDu5kjXb/Es4l6u2CwaQpKM/m2TRrfqAiINishiIsF+iX2CV1hdlFaQJluiw6Pz0FEQjZZUlKgUtDoKEeLNHxRuF4o0+GW/t0uFMQio801vKc1lOQTKjOAo7xpyCFgISrA74JFhTSMrDk8xzo3QhJ8XhpylJWXMSZyUxNunoB6xPdaTypyeaLHE6vCc+leCOrw3QggOvS4Tg8xwqmw1EsSowB05mGx+1fZf8fFTUVQgUBUQmTDO+h4EoSQZN1Hrpgrl3ZZ9YZDcDsIByEU6/TUBSaXrKg5ammn2m2MbLPQgQjo5KXVFpdWosqWafTqlIQlrCYDDoMG17kK+FaEaF99wsPJq//ZczwGSHb8vhJxtFDs1bn9zuY2wHDfsuBKbdenb5u0elF73ddzvzm11/VmGpNbn7048HU+LsZX9yYeU+5a7Hc627RfnCw4VHHkYMVd28futjX3ta5pL2t4sKFCykX+grd8QlDl6iz8rHq7eeKu65l2YN0Vd7q7BPylgv9F9u9Z36Uz21Gqg9/l3u5+aWhojG3hmozPmoeqR218LOmR3/tFuQB5pCHfOF61Yi6O91T7lHJb5ul4qqRDaofPwkO9spLHnTgxcEFic2zTyi+aTNiyjdmIilfKCswf1Pz9xOvz0jd0XP9zow94qT4P95aMX76/Pie2IyHtXtzgxMnXlmjSuietebFh5u+r58c++7np872LfzNqy2DcQknY2dX/uNv8bM3/OLbi36VhdwTHrN/Wg1/9ZNt4wZck+deP3/o4q5zJWz7ovrkG+mfXvlmdOEb221x46vSWl4Z++H5paffLLKv7W7X+Bw7NVO2zmvbOHbbhPq5GTZfMA2bImsDy1c49v/p2kP3D8GNq5C1MY/uVxW7b1b2ZMfW9mf1iPLQ1LfWdg5teDRy/JRZt7GerZ1XR91cV/rxW1de77oZBvsXX6aplOLd9z8qMJavH7X3vQln103SIg+NP/xqBPreUfPAJnFEYY2lPXUreYP4Mp2pq1YLzduK+gp/r9ROX4l+WNTXej/1sG3Wi+HwpYxTyjjqFdXd844v+cn9zXcGdXO6QnN+Fzz6bfHOMzv7a1aXkvaxIHXH4U3Ld3SgM3ort9SQCdVfnbwzp3ScPUttPFt3mFpOfLyGPnXt6ImVwY5uqzclewXmG1PY1Jqzfl+zqTLp6+OffoaVTBy9NxS3YUJswhi90VE3MB89nPv3g/WnW9UrD9BtVR2Vjs0NDbYzpYfi5lU/uAZmXkua7fvA+xrjP/mHTkfvm7WD3eWdht3v9I7vPLYmbfPU909QtX/WHTuOTf3u0ssn/1L2Tt2G9Yuzx722bL6/cXPeAfp4cMFA6N0t/W1M+GrX6/UHtpw7tzw2Jubx4xExzmmXlq8cGRPzT06hzwc=
|
||||
@@ -135,11 +135,11 @@ docs = vectorstore.similarity_search(query)
|
||||
|
||||
Many vectorstores support search parameters to be passed with the `similarity_search` method. See the documentation for the specific vectorstore you are using to see what parameters are supported.
|
||||
As an example [Pinecone](https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html#langchain_pinecone.vectorstores.PineconeVectorStore.similarity_search) several parameters that are important general concepts:
|
||||
Many vectorstores support [the `k`](/docs/integrations/vectorstores/pinecone/#query-directly), which controls the number of Documents to return, and `filter`, which allows for filtering documents by metadata.
|
||||
Many vectorstores support [the `k`](/docs/integrations/vectorstores/pinecone/#query-directly), which controls the number of documents to return, and `filter`, which allows for filtering documents by metadata.
|
||||
|
||||
- `query (str) – Text to look up documents similar to.`
|
||||
- `k (int) – Number of Documents to return. Defaults to 4.`
|
||||
- `filter (dict | None) – Dictionary of argument(s) to filter on metadata`
|
||||
- `query (str) - Text to look up documents similar to.`
|
||||
- `k (int) - Number of documents to return. Defaults to 4.`
|
||||
- `filter (dict | None) - Dictionary of argument(s) to filter on metadata`
|
||||
|
||||
:::info[Further reading]
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ This tutorial will guide you through making a simple documentation edit, like co
|
||||
|
||||
### **Prerequisites**
|
||||
- GitHub account.
|
||||
- Familiarity with GitHub pull requests (basic understanding).
|
||||
- Familiarity with [GitHub pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests) (basic understanding).
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -38,11 +38,11 @@
|
||||
"\n",
|
||||
"\n",
|
||||
":::caution COMPATIBILITY\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in python<=3.10. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"If you are running `python<=3.10`, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
|
||||
"If you are running `python>=3.11`, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -16,15 +16,15 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you have [tools](/docs/concepts/tools/) that call [chat models](/docs/concepts/chat_models/), [retrievers](/docs/concepts/retrievers/), or other [runnables](/docs/concepts/runnables/), you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
|
||||
"If you have [tools](/docs/concepts/tools/) that call [chat models](/docs/concepts/chat_models/), [retrievers](/docs/concepts/retrievers/), or other [runnables](/docs/concepts/runnables/), you may want to access [internal events](https://python.langchain.com/docs/how_to/streaming/#event-reference) from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
|
||||
"\n",
|
||||
":::caution Compatibility\n",
|
||||
"\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for `astream_events()`, to child runnables if you are running `async` code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for `astream_events()`, to child runnables if you are running `async` code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"If you are running `python<=3.10`, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in older Python versions.\n",
|
||||
"If you are running `python>=3.11`, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in older Python versions.\n",
|
||||
"\n",
|
||||
"This guide also requires `langchain-core>=0.2.16`.\n",
|
||||
":::\n",
|
||||
|
||||
@@ -224,6 +224,13 @@
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note the key difference: accumulating `tool_call_chunks` captures the raw tool arguments as an unparsed string as they are streamed. In contrast, **accumulating** `tool_calls` demonstrates partial parsing by progressively converting the streamed argument string into a valid, usable dictionary at each step of the process."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Streamlit
|
||||
|
||||
> **[Streamlit](https://streamlit.io/) is a faster way to build and share data apps.**
|
||||
> Streamlit turns data scripts into shareable web apps in minutes. All in pure Python. No front‑end experience required.
|
||||
> Streamlit turns data scripts into shareable web apps in minutes. All in pure Python. No front-end experience required.
|
||||
> See more examples at [streamlit.io/generative-ai](https://streamlit.io/generative-ai).
|
||||
|
||||
[](https://codespaces.new/langchain-ai/streamlit-agent?quickstart=1)
|
||||
|
||||
@@ -893,7 +893,7 @@
|
||||
"source": [
|
||||
"## Citations\n",
|
||||
"\n",
|
||||
"Anthropic supports a [citations](https://docs.anthropic.com/en/docs/build-with-claude/citations) feature that lets Claude attach context to its answers based on source documents supplied by the user. When [document content blocks](https://docs.anthropic.com/en/docs/build-with-claude/citations#document-types) with `\"citations\": {\"enabled\": True}` are included in a query, Claude may generate citations in its response.\n",
|
||||
"Anthropic supports a [citations](https://docs.anthropic.com/en/docs/build-with-claude/citations) feature that lets Claude attach context to its answers based on source documents supplied by the user. When [document](https://docs.anthropic.com/en/docs/build-with-claude/citations#document-types) or `search result` content blocks with `\"citations\": {\"enabled\": True}` are included in a query, Claude may generate citations in its response.\n",
|
||||
"\n",
|
||||
"### Simple example\n",
|
||||
"\n",
|
||||
@@ -963,6 +963,156 @@
|
||||
"response.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ca82106-69b3-4266-bf23-b2ffba873ee2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In tool results (agentic RAG)\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-anthropic>=0.3.17``\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Claude supports a [search_result](https://docs.anthropic.com/en/docs/build-with-claude/search-results) content block representing citable results from queries against a knowledge base or other custom source. These content blocks can be passed to claude both top-line (as in the above example) and within a tool result. This allows Claude to cite elements of its response using the result of a tool call.\n",
|
||||
"\n",
|
||||
"To pass search results in response to tool calls, define a tool that returns a list of `search_result` content blocks in Anthropic's native format. For example:\n",
|
||||
"```python\n",
|
||||
"def retrieval_tool(query: str) -> list[dict]:\n",
|
||||
" \"\"\"Access my knowledge base.\"\"\"\n",
|
||||
"\n",
|
||||
" # Run a search (e.g., with a LangChain vector store)\n",
|
||||
" results = vector_store.similarity_search(query=query, k=2)\n",
|
||||
"\n",
|
||||
" # Package results into search_result blocks\n",
|
||||
" return [\n",
|
||||
" {\n",
|
||||
" \"type\": \"search_result\",\n",
|
||||
" # Customize fields as desired, using document metadata or otherwise\n",
|
||||
" \"title\": \"My Document Title\",\n",
|
||||
" \"source\": \"Source description or provenance\",\n",
|
||||
" \"citations\": {\"enabled\": True},\n",
|
||||
" \"content\": [{\"type\": \"text\", \"text\": doc.page_content}],\n",
|
||||
" }\n",
|
||||
" for doc in results\n",
|
||||
" ]\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We also need to specify the `search-results-2025-06-09` beta when instantiating ChatAnthropic. You can see an end-to-end example below.\n",
|
||||
"\n",
|
||||
"<details>\n",
|
||||
"<summary>End to end example with LangGraph</summary>\n",
|
||||
"\n",
|
||||
"Here we demonstrate an end-to-end example in which we populate a LangChain [vector store](/docs/concepts/vectorstores/) with sample documents and equip Claude with a tool that queries those documents.\n",
|
||||
"The tool here takes a search query and a `category` string literal, but any valid tool signature can be used.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"from langchain.embeddings import init_embeddings\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langgraph.checkpoint.memory import InMemorySaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up vector store\n",
|
||||
"embeddings = init_embeddings(\"openai:text-embedding-3-small\")\n",
|
||||
"vector_store = InMemoryVectorStore(embeddings)\n",
|
||||
"\n",
|
||||
"document_1 = Document(\n",
|
||||
" id=\"1\",\n",
|
||||
" page_content=(\n",
|
||||
" \"To request vacation days, submit a leave request form through the \"\n",
|
||||
" \"HR portal. Approval will be sent by email.\"\n",
|
||||
" ),\n",
|
||||
" metadata={\n",
|
||||
" \"category\": \"HR Policy\",\n",
|
||||
" \"doc_title\": \"Leave Policy\",\n",
|
||||
" \"provenance\": \"Leave Policy - page 1\",\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"document_2 = Document(\n",
|
||||
" id=\"2\",\n",
|
||||
" page_content=\"Managers will review vacation requests within 3 business days.\",\n",
|
||||
" metadata={\n",
|
||||
" \"category\": \"HR Policy\",\n",
|
||||
" \"doc_title\": \"Leave Policy\",\n",
|
||||
" \"provenance\": \"Leave Policy - page 2\",\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"document_3 = Document(\n",
|
||||
" id=\"3\",\n",
|
||||
" page_content=(\n",
|
||||
" \"Employees with over 6 months tenure are eligible for 20 paid vacation days \"\n",
|
||||
" \"per year.\"\n",
|
||||
" ),\n",
|
||||
" metadata={\n",
|
||||
" \"category\": \"Benefits Policy\",\n",
|
||||
" \"doc_title\": \"Benefits Guide 2025\",\n",
|
||||
" \"provenance\": \"Benefits Policy - page 1\",\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"documents = [document_1, document_2, document_3]\n",
|
||||
"vector_store.add_documents(documents=documents)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define tool\n",
|
||||
"async def retrieval_tool(\n",
|
||||
" query: str, category: Literal[\"HR Policy\", \"Benefits Policy\"]\n",
|
||||
") -> list[dict]:\n",
|
||||
" \"\"\"Access my knowledge base.\"\"\"\n",
|
||||
"\n",
|
||||
" def _filter_function(doc: Document) -> bool:\n",
|
||||
" return doc.metadata.get(\"category\") == category\n",
|
||||
"\n",
|
||||
" results = vector_store.similarity_search(\n",
|
||||
" query=query, k=2, filter=_filter_function\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return [\n",
|
||||
" {\n",
|
||||
" \"type\": \"search_result\",\n",
|
||||
" \"title\": doc.metadata[\"doc_title\"],\n",
|
||||
" \"source\": doc.metadata[\"provenance\"],\n",
|
||||
" \"citations\": {\"enabled\": True},\n",
|
||||
" \"content\": [{\"type\": \"text\", \"text\": doc.page_content}],\n",
|
||||
" }\n",
|
||||
" for doc in results\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create agent\n",
|
||||
"llm = init_chat_model(\n",
|
||||
" \"anthropic:claude-3-5-haiku-latest\",\n",
|
||||
" betas=[\"search-results-2025-06-09\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"checkpointer = InMemorySaver()\n",
|
||||
"agent = create_react_agent(llm, [retrieval_tool], checkpointer=checkpointer)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Invoke on a query\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"session_1\"}}\n",
|
||||
"\n",
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"How do I request vacation days?\",\n",
|
||||
"}\n",
|
||||
"async for step in agent.astream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" config,\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"</details>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69956596-0e6c-492b-934d-c08ed3c9de9a",
|
||||
|
||||
381
docs/docs/integrations/chat/greennode.ipynb
Normal file
381
docs/docs/integrations/chat/greennode.ipynb
Normal file
@@ -0,0 +1,381 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: GreenNode\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGreenNode\n",
|
||||
"\n",
|
||||
">[GreenNode](https://greennode.ai/) is a global AI solutions provider and a **NVIDIA Preferred Partner**, delivering full-stack AI capabilities—from infrastructure to application—for enterprises across the US, MENA, and APAC regions. Operating on **world-class infrastructure** (LEED Gold, TIA‑942, Uptime Tier III), GreenNode empowers enterprises, startups, and researchers with a comprehensive suite of AI services\n",
|
||||
"\n",
|
||||
"This page will help you get started with GreenNode Serverless AI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatGreenNode features and configurations head to the [API reference](https://python.langchain.com/api_reference/greennode/chat_models/langchain_greennode.chat_models.ChatGreenNode.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"[GreenNode AI](https://greennode.ai/) offers an API to query [20+ leading open-source models](https://aiplatform.console.greennode.ai/models)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGreenNode](https://python.langchain.com/api_reference/greennode/chat_models/langchain_greennode.chat_models.ChatGreenNode.html) | [langchain-greennode](https://python.langchain.com/api_reference/greennode/index.html) | ❌ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access GreenNode models you'll need to create a GreenNode account, get an API key, and install the `langchain-greennode` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://aiplatform.console.greennode.ai/api-keys) to sign up to GreenNode AI Platform and generate an API key. Once you've done this, set the GREENNODE_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GREENNODE_API_KEY\"):\n",
|
||||
" os.environ[\"GREENNODE_API_KEY\"] = getpass.getpass(\"Enter your GreenNode API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain GreenNode integration lives in the `langchain-greennode` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-greennode"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_greennode import ChatGreenNode\n",
|
||||
"\n",
|
||||
"# Initialize the chat model\n",
|
||||
"llm = ChatGreenNode(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B\", # Choose from available models\n",
|
||||
" temperature=0.6,\n",
|
||||
" top_p=0.95,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"\\n\\nJ'aime la programmation.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 248, 'prompt_tokens': 23, 'total_tokens': 271, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', 'system_fingerprint': None, 'id': 'chatcmpl-271edac4958846068c37877586368afe', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--5c12d208-2bc2-4f29-8b50-1ce3b515a3cf-0', usage_metadata={'input_tokens': 23, 'output_tokens': 248, 'total_tokens': 271, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82fd95b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"You can also stream the response using the `stream` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4b3eaf31",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"**Beneath the Circuits**\n",
|
||||
"\n",
|
||||
"Beneath the circuits, deep and bright, \n",
|
||||
"AI thinks, with circuits and bytes. \n",
|
||||
"Learning, adapting, it grows, \n",
|
||||
"A world of possibilities it knows. \n",
|
||||
"\n",
|
||||
"From solving puzzles to painting art, \n",
|
||||
"It mimics human hearts. \n",
|
||||
"In every corner, it leaves its trace, \n",
|
||||
"A future we can't erase. \n",
|
||||
"\n",
|
||||
"We build it, shape it, with care and might, \n",
|
||||
"Yet wonder if it walks in the night. \n",
|
||||
"A mirror of our minds, it shows, \n",
|
||||
"In its gaze, our future glows. \n",
|
||||
"\n",
|
||||
"But as we strive for endless light, \n",
|
||||
"We must remember the night. \n",
|
||||
"For wisdom isn't just speed and skill, \n",
|
||||
"It's how we choose to build our will."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(\"Write a short poem about artificial intelligence\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bfecc41",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Chat Messages\n",
|
||||
"\n",
|
||||
"You can use different message types to structure your conversations with the model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7fc55733",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Black holes are formed through several processes, depending on their type. The most common way bla\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI assistant with expertise in science.\"),\n",
|
||||
" HumanMessage(content=\"What are black holes?\"),\n",
|
||||
" AIMessage(\n",
|
||||
" content=\"Black holes are regions of spacetime where gravity is so strong that nothing, including light, can escape from them.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"How are they formed?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"response = llm.invoke(messages)\n",
|
||||
"print(response.content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can use `ChatGreenNode` in LangChain chains and agents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='\\n\\nIch liebe Programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 198, 'prompt_tokens': 18, 'total_tokens': 216, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', 'system_fingerprint': None, 'id': 'chatcmpl-e01201b9fd9746b7a9b2ed6d70f29d45', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--ce52b9d8-dd84-46b3-845b-da27855816ee-0', usage_metadata={'input_tokens': 18, 'output_tokens': 198, 'total_tokens': 216, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "736489f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Available Models\n",
|
||||
"\n",
|
||||
"The full list of supported models can be found in the [GreenNode Serverless AI Models](https://greennode.ai/product/model-as-a-service)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more details about the GreenNode Serverless AI API, visit the [GreenNode Serverless AI Documentation](https://helpdesk.greennode.ai/portal/en/kb/articles/greennode-maas-api)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "tradingagents",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -120,7 +120,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -138,11 +138,36 @@
|
||||
"from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint\n",
|
||||
"\n",
|
||||
"llm = HuggingFaceEndpoint(\n",
|
||||
" repo_id=\"HuggingFaceH4/zephyr-7b-beta\",\n",
|
||||
" repo_id=\"deepseek-ai/DeepSeek-R1-0528\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" max_new_tokens=512,\n",
|
||||
" do_sample=False,\n",
|
||||
" repetition_penalty=1.03,\n",
|
||||
" provider=\"auto\", # let Hugging Face choose the best provider for you\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat_model = ChatHuggingFace(llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's take advantage of [Inference Providers](https://huggingface.co/docs/inference-providers) to run the model on specific third-party providers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = HuggingFaceEndpoint(\n",
|
||||
" repo_id=\"deepseek-ai/DeepSeek-R1-0528\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" provider=\"hyperbolic\", # set your provider here\n",
|
||||
" # provider=\"nebius\",\n",
|
||||
" # provider=\"together\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat_model = ChatHuggingFace(llm=llm)"
|
||||
|
||||
@@ -345,7 +345,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `ChatXAI` features and configurations, head to the API reference: https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html"
|
||||
"For detailed documentation of all `ChatXAI` features and configurations, head to the [API reference](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -117,7 +117,7 @@
|
||||
"source": [
|
||||
"## Examples\n",
|
||||
"\n",
|
||||
"Here is an example of how you can access `HuggingFaceEndpoint` integration of the free [Serverless Endpoints](https://huggingface.co/inference-endpoints/serverless) API."
|
||||
"Here is an example of how you can access `HuggingFaceEndpoint` integration of the serverless [Inference Providers](https://huggingface.co/docs/inference-providers) API.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -128,13 +128,17 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"repo_id = \"mistralai/Mistral-7B-Instruct-v0.2\"\n",
|
||||
"repo_id = \"deepseek-ai/DeepSeek-R1-0528\"\n",
|
||||
"\n",
|
||||
"llm = HuggingFaceEndpoint(\n",
|
||||
" repo_id=repo_id,\n",
|
||||
" max_length=128,\n",
|
||||
" temperature=0.5,\n",
|
||||
" huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,\n",
|
||||
" provider=\"auto\", # set your provider here hf.co/settings/inference-providers\n",
|
||||
" # provider=\"hyperbolic\",\n",
|
||||
" # provider=\"nebius\",\n",
|
||||
" # provider=\"together\",\n",
|
||||
")\n",
|
||||
"llm_chain = prompt | llm\n",
|
||||
"print(llm_chain.invoke({\"question\": question}))"
|
||||
|
||||
@@ -18,14 +18,79 @@ See a [usage example](/docs/integrations/vectorstores/couchbase).
|
||||
|
||||
```python
|
||||
from langchain_couchbase import CouchbaseSearchVectorStore
|
||||
|
||||
import getpass
|
||||
|
||||
# Constants for the connection
|
||||
COUCHBASE_CONNECTION_STRING = getpass.getpass(
|
||||
"Enter the connection string for the Couchbase cluster: "
|
||||
)
|
||||
DB_USERNAME = getpass.getpass("Enter the username for the Couchbase cluster: ")
|
||||
DB_PASSWORD = getpass.getpass("Enter the password for the Couchbase cluster: ")
|
||||
|
||||
# Create Couchbase connection object
|
||||
from datetime import timedelta
|
||||
|
||||
from couchbase.auth import PasswordAuthenticator
|
||||
from couchbase.cluster import Cluster
|
||||
from couchbase.options import ClusterOptions
|
||||
|
||||
auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)
|
||||
options = ClusterOptions(auth)
|
||||
cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)
|
||||
|
||||
# Wait until the cluster is ready for use.
|
||||
cluster.wait_until_ready(timedelta(seconds=5))
|
||||
|
||||
vector_store = CouchbaseSearchVectorStore(
|
||||
cluster=cluster,
|
||||
bucket_name=BUCKET_NAME,
|
||||
scope_name=SCOPE_NAME,
|
||||
collection_name=COLLECTION_NAME,
|
||||
embedding=my_embeddings,
|
||||
index_name=SEARCH_INDEX_NAME,
|
||||
)
|
||||
|
||||
# Add documents
|
||||
texts = ["Couchbase is a NoSQL database", "LangChain is a framework for LLM applications"]
|
||||
vectorstore.add_texts(texts)
|
||||
|
||||
# Search
|
||||
query = "What is Couchbase?"
|
||||
docs = vectorstore.similarity_search(query)
|
||||
```
|
||||
|
||||
API Reference: [CouchbaseSearchVectorStore](https://couchbase-ecosystem.github.io/langchain-couchbase/langchain_couchbase.html#module-langchain_couchbase.vectorstores.search_vector_store)
|
||||
|
||||
## Document loader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/couchbase).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders.couchbase import CouchbaseLoader
|
||||
|
||||
connection_string = "couchbase://localhost" # valid Couchbase connection string
|
||||
db_username = (
|
||||
"Administrator" # valid database user with read access to the bucket being queried
|
||||
)
|
||||
db_password = "Password" # password for the database user
|
||||
|
||||
# query is a valid SQL++ query
|
||||
query = """
|
||||
SELECT h.* FROM `travel-sample`.inventory.hotel h
|
||||
WHERE h.country = 'United States'
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
loader = CouchbaseLoader(
|
||||
connection_string,
|
||||
db_username,
|
||||
db_password,
|
||||
query,
|
||||
)
|
||||
|
||||
docs = loader.load()
|
||||
|
||||
```
|
||||
|
||||
## LLM Caches
|
||||
@@ -56,6 +121,7 @@ set_llm_cache(
|
||||
)
|
||||
```
|
||||
|
||||
API Reference: [CouchbaseCache](https://couchbase-ecosystem.github.io/langchain-couchbase/langchain_couchbase.html#langchain_couchbase.cache.CouchbaseCache)
|
||||
|
||||
### CouchbaseSemanticCache
|
||||
Semantic caching allows users to retrieve cached prompts based on the semantic similarity between the user input and previously cached inputs. Under the hood it uses Couchbase as both a cache and a vectorstore.
|
||||
@@ -90,6 +156,8 @@ set_llm_cache(
|
||||
)
|
||||
```
|
||||
|
||||
API Reference: [CouchbaseSemanticCache](https://couchbase-ecosystem.github.io/langchain-couchbase/langchain_couchbase.html#langchain_couchbase.cache.CouchbaseSemanticCache)
|
||||
|
||||
## Chat Message History
|
||||
Use Couchbase as the storage for your chat messages.
|
||||
|
||||
@@ -108,4 +176,6 @@ message_history = CouchbaseChatMessageHistory(
|
||||
)
|
||||
|
||||
message_history.add_user_message("hi!")
|
||||
```
|
||||
```
|
||||
|
||||
API Reference: [CouchbaseChatMessageHistory](https://couchbase-ecosystem.github.io/langchain-couchbase/langchain_couchbase.html#module-langchain_couchbase.chat_message_histories)
|
||||
173
docs/docs/integrations/providers/greennode.ipynb
Normal file
173
docs/docs/integrations/providers/greennode.ipynb
Normal file
@@ -0,0 +1,173 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GreenNode\n",
|
||||
"\n",
|
||||
">**GreenNode** is a global AI solutions provider and a **NVIDIA Preferred Partner**, delivering full-stack AI capabilities—from infrastructure to application—for enterprises across the US, MENA, and APAC regions.\n",
|
||||
">Operating on **world-class infrastructure** (LEED Gold, TIA‑942, Uptime Tier III), **GreenNode** empowers enterprises, startups, and researchers with a comprehensive suite of AI services:\n",
|
||||
">- [Powerful AI Infrastructure:](https://greennode.ai/) As one of the first hyperscale AI clusters in APAC, powered by NVIDIA H100 GPUs, GreenNode's infrastructure is optimized for high-throughput machine learning and deep learning workloads.\n",
|
||||
">- [GreenNode AI Platform:](https://greennode.ai/product/ai-platform) Designed for technical teams, GreenNode’s self-service AI platform enables fast deployment of Jupyter notebook environments, preconfigured with optimized compute instances. From this portal, developers can launch ML training, fine-tuning, hyperparameter optimization, and inference workflows with minimal setup time. The platform includes access to 100+ curated open-source models and supports integrations with common MLOps tools and storage frameworks.\n",
|
||||
">- [GreenNode Serverless AI:](https://greennode.ai/product/model-as-a-service) GreenNode Serverless AI features a library of pre-trained production-ready models across domains such as text gen, code gen, text to speech, speech to text, embedding and reranking models. This service is ideal for teams looking to prototype or deploy AI solutions without managing model infrastructure.\n",
|
||||
">- [AI Applications:](https://vngcloud.vn/en/solution) From intelligent data management and document processing (IDP) to smart video analytics—GreenNode supports real-world AI use cases at scale.\n",
|
||||
">Whether you're building your next LLM workflow, scaling AI research, or deploying enterprise-grade applications, **GreenNode** provides the tools and infrastructure to accelerate your journey.\n",
|
||||
"\n",
|
||||
"## Installation and Setup\n",
|
||||
"\n",
|
||||
"The GreenNode integration can be installed via pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-greennode"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### API Key\n",
|
||||
"\n",
|
||||
"To use GreenNode Serverless AI, you'll need an API key which you can obtain from [GreenNode Serverless AI](https://aiplatform.console.greennode.ai/api-keys). The API key can be passed as an initialization parameter `api_key` or set as the environment variable `GREENNODE_API_KEY`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GREENNODE_API_KEY\"):\n",
|
||||
" os.environ[\"GREENNODE_API_KEY\"] = getpass.getpass(\"Enter your GreenNode API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_greennode import ChatGreenNode\n",
|
||||
"\n",
|
||||
"chat = ChatGreenNode(\n",
|
||||
" model=\"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B\", # Choose from available models\n",
|
||||
" temperature=0.6,\n",
|
||||
" top_p=0.95,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Usage of the GreenNode [Chat Model](https://python.langchain.com/docs/integrations/chat/greennode/)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"## Embedding models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_greennode import GreenNodeEmbeddings\n",
|
||||
"\n",
|
||||
"# Initialize embeddings\n",
|
||||
"embeddings = GreenNodeEmbeddings(\n",
|
||||
" model=\"BAAI/bge-m3\" # Choose from available models\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Usage of the GreenNode [Embedding Model](https://python.langchain.com/docs/integrations/text_embedding/greennode)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Rerank"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_greennode import GreenNodeRerank\n",
|
||||
"\n",
|
||||
"# Initialize reranker\n",
|
||||
"rerank = GreenNodeRerank(\n",
|
||||
" model=\"BAAI/bge-reranker-v2-m3\", # Choose from available models\n",
|
||||
" top_n=-1,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Usage of the GreenNode [Rerank Model](https://python.langchain.com/docs/integrations/retrievers/greennode-reranker)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "tradingagents",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,6 +1,11 @@
|
||||
# Hugging Face
|
||||
|
||||
All functionality related to the [Hugging Face Platform](https://huggingface.co/).
|
||||
All functionality related to [Hugging Face Hub](https://huggingface.co/) and libraries like [transformers](https://huggingface.co/docs/transformers/index), [sentence transformers](https://sbert.net/), and [datasets](https://huggingface.co/docs/datasets/index).
|
||||
|
||||
> [Hugging Face](https://huggingface.co/) is an AI platform with all major open source models, datasets, MCPs, and demos.
|
||||
> It supplies model inference locally and via serverless [Inference Providers](https://huggingface.co/docs/inference-providers).
|
||||
>
|
||||
> You can use [Inference Providers](https://huggingface.co/docs/inference-providers) to run open source models like DeepSeek R1 on scalable serverless infrastructure.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -26,6 +31,7 @@ from langchain_huggingface import ChatHuggingFace
|
||||
|
||||
### HuggingFaceEndpoint
|
||||
|
||||
We can use the `HuggingFaceEndpoint` class to run open source models via serverless [Inference Providers](https://huggingface.co/docs/inference-providers) or via dedicated [Inference Endpoints](https://huggingface.co/inference-endpoints/dedicated).
|
||||
|
||||
See a [usage example](/docs/integrations/llms/huggingface_endpoint).
|
||||
|
||||
@@ -35,7 +41,7 @@ from langchain_huggingface import HuggingFaceEndpoint
|
||||
|
||||
### HuggingFacePipeline
|
||||
|
||||
Hugging Face models can be run locally through the `HuggingFacePipeline` class.
|
||||
We can use the `HuggingFacePipeline` class to run open source models locally.
|
||||
|
||||
See a [usage example](/docs/integrations/llms/huggingface_pipelines).
|
||||
|
||||
@@ -47,6 +53,8 @@ from langchain_huggingface import HuggingFacePipeline
|
||||
|
||||
### HuggingFaceEmbeddings
|
||||
|
||||
We can use the `HuggingFaceEmbeddings` class to run open source embedding models locally.
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/huggingfacehub).
|
||||
|
||||
```python
|
||||
@@ -55,6 +63,8 @@ from langchain_huggingface import HuggingFaceEmbeddings
|
||||
|
||||
### HuggingFaceEndpointEmbeddings
|
||||
|
||||
We can use the `HuggingFaceEndpointEmbeddings` class to run open source embedding models via a dedicated [Inference Endpoint](https://huggingface.co/inference-endpoints/dedicated).
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/huggingfacehub).
|
||||
|
||||
```python
|
||||
@@ -63,6 +73,8 @@ from langchain_huggingface import HuggingFaceEndpointEmbeddings
|
||||
|
||||
### HuggingFaceInferenceAPIEmbeddings
|
||||
|
||||
We can use the `HuggingFaceInferenceAPIEmbeddings` class to run open source embedding models via [Inference Providers](https://huggingface.co/docs/inference-providers).
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/huggingfacehub).
|
||||
|
||||
```python
|
||||
@@ -71,6 +83,8 @@ from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
|
||||
|
||||
### HuggingFaceInstructEmbeddings
|
||||
|
||||
We can use the `HuggingFaceInstructEmbeddings` class to run open source embedding models locally.
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/instruct_embeddings).
|
||||
|
||||
```python
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Streamlit
|
||||
|
||||
>[Streamlit](https://streamlit.io/) is a faster way to build and share data apps.
|
||||
>`Streamlit` turns data scripts into shareable web apps in minutes. All in pure Python. No front‑end experience required.
|
||||
>`Streamlit` turns data scripts into shareable web apps in minutes. All in pure Python. No front-end experience required.
|
||||
>See more examples at [streamlit.io/generative-ai](https://streamlit.io/generative-ai).
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
"\n",
|
||||
"chat = ChatXAI(\n",
|
||||
" # xai_api_key=\"YOUR_API_KEY\",\n",
|
||||
" model=\"grok-beta\",\n",
|
||||
" model=\"grok-4\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# stream the response back from the model\n",
|
||||
|
||||
361
docs/docs/integrations/retrievers/greennode_reranker.ipynb
Normal file
361
docs/docs/integrations/retrievers/greennode_reranker.ipynb
Normal file
@@ -0,0 +1,361 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: GreenNode\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GreenNodeRetriever\n",
|
||||
"\n",
|
||||
">[GreenNode](https://greennode.ai/) is a global AI solutions provider and a **NVIDIA Preferred Partner**, delivering full-stack AI capabilities—from infrastructure to application—for enterprises across the US, MENA, and APAC regions. Operating on **world-class infrastructure** (LEED Gold, TIA‑942, Uptime Tier III), GreenNode empowers enterprises, startups, and researchers with a comprehensive suite of AI services\n",
|
||||
"\n",
|
||||
"This notebook provides a walkthrough on getting started with the `GreenNodeRerank` retriever. It enables you to perform document search using built-in connectors or by integrating your own data sources, leveraging GreenNode's reranking capabilities for improved relevance.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- **Provider**: [GreenNode Serverless AI](https://aiplatform.console.greennode.ai/playground)\n",
|
||||
"- **Model Types**: Reranking models\n",
|
||||
"- **Primary Use Case**: Reranking search results based on semantic relevance\n",
|
||||
"- **Available Models**: Includes [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and other high-performance reranking models\n",
|
||||
"- **Scoring**: Returns relevance scores used to reorder document candidates based on query alignment\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access GreenNode models you'll need to create a GreenNode account, get an API key, and install the `langchain-greennode` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://aiplatform.console.greennode.ai/api-keys) to sign up to GreenNode AI Platform and generate an API key. Once you've done this, set the GREENNODE_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a92b5a70",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GREENNODE_API_KEY\"):\n",
|
||||
" os.environ[\"GREENNODE_API_KEY\"] = getpass.getpass(\"Enter your GreenNode API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This retriever lives in the `langchain-greennode` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-greennode"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"The `GreenNodeRerank` class can be instantiated with optional parameters for the API key and model name:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "70cc8e65-2a02-408a-bbc6-8ef649057d82",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_greennode import GreenNodeRerank\n",
|
||||
"\n",
|
||||
"# Initialize the embeddings model\n",
|
||||
"reranker = GreenNodeRerank(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"BAAI/bge-reranker-v2-m3\", # The default embedding model\n",
|
||||
" top_n=3,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c5f2839-4020-424e-9fc9-07777eede442",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"### Reranking Search Results\n",
|
||||
"\n",
|
||||
"Reranking models enhance retrieval-augmented generation (RAG) workflows by refining and reordering initial search results based on semantic relevance. The example below demonstrates how to integrate GreenNodeRerank with a base retriever to improve the quality of retrieved documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/var/folders/bs/g52lln652z11zjp98qf9wcy40000gn/T/ipykernel_96362/2544494776.py:41: LangChainDeprecationWarning: The method `BaseRetriever.get_relevant_documents` was deprecated in langchain-core 0.1.46 and will be removed in 1.0. Use :meth:`~invoke` instead.\n",
|
||||
" results = rerank_retriever.get_relevant_documents(query)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(metadata={'relevance_score': 0.125}, page_content='Central banks use interest rates to control inflation and stabilize the economy'),\n",
|
||||
" Document(metadata={'relevance_score': 0.004913330078125}, page_content='Inflation represents the rate at which the general level of prices for goods and services rises'),\n",
|
||||
" Document(metadata={'relevance_score': 1.6689300537109375e-05}, page_content='Cryptocurrencies like Bitcoin operate on decentralized blockchain networks')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_greennode import GreenNodeEmbeddings\n",
|
||||
"\n",
|
||||
"# Initialize the embeddings model\n",
|
||||
"embeddings = GreenNodeEmbeddings(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"BAAI/bge-m3\" # The default embedding model\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Prepare documents (finance/economics domain)\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Inflation represents the rate at which the general level of prices for goods and services rises\"\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Central banks use interest rates to control inflation and stabilize the economy\"\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Cryptocurrencies like Bitcoin operate on decentralized blockchain networks\"\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Stock markets are influenced by corporate earnings, investor sentiment, and economic indicators\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Create a vector store and a base retriever\n",
|
||||
"vector_store = FAISS.from_documents(docs, embeddings)\n",
|
||||
"base_retriever = vector_store.as_retriever(search_kwargs={\"k\": 4})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"rerank_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=reranker, base_retriever=base_retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Perform retrieval with reranking\n",
|
||||
"query = \"How do central banks fight rising prices?\"\n",
|
||||
"results = rerank_retriever.get_relevant_documents(query)\n",
|
||||
"\n",
|
||||
"results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7efa742d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Direct Usage\n",
|
||||
"\n",
|
||||
"The `GreenNodeRerank` class can be used independently to perform reranking of retrieved documents based on relevance scores. This functionality is particularly useful in scenarios where a primary retrieval step (e.g., keyword or vector search) returns a broad set of candidates, and a secondary model is needed to refine the results using more sophisticated semantic understanding. The class accepts a query and a list of candidate documents and returns a reordered list based on predicted relevance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "78d9051e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'index': 1, 'relevance_score': 1.0},\n",
|
||||
" {'index': 0, 'relevance_score': 0.01165771484375},\n",
|
||||
" {'index': 3, 'relevance_score': 0.0012054443359375}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_documents = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Carson City is the capital city of the American state of Nevada.\"\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Washington, D.C. (also known as simply Washington or D.C.) is the capital of the United States.\"\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Capital punishment has existed in the United States since beforethe United States was a country.\"\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"test_query = \"What is the capital of the United States?\"\n",
|
||||
"results = reranker.rerank(test_documents, test_query)\n",
|
||||
"results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within a chain\n",
|
||||
"\n",
|
||||
"GreenNodeRerank works seamlessly in LangChain RAG pipelines. Here's an example of creating a simple RAG chain with the GreenNodeRerank:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "25b647a3-f8f2-4541-a289-7a241e43f9df",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nCentral banks combat rising prices, or inflation, by adjusting interest rates. By raising interest rates, they increase the cost of borrowing, which discourages spending and investment. This reduction in demand helps slow down the rate of price increases, thereby controlling inflation and contributing to economic stability.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_greennode import ChatGreenNode\n",
|
||||
"\n",
|
||||
"# Initialize LLM\n",
|
||||
"llm = ChatGreenNode(model=\"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B\")\n",
|
||||
"\n",
|
||||
"# Create a prompt template\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"\n",
|
||||
"Answer the question based only on the following context:\n",
|
||||
"\n",
|
||||
"Context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Format documents function\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create RAG chain\n",
|
||||
"rag_chain = (\n",
|
||||
" {\"context\": rerank_retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Run the chain\n",
|
||||
"answer = rag_chain.invoke(\"How do central banks fight rising prices?\")\n",
|
||||
"answer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more details about the GreenNode Serverless AI API, visit the [GreenNode Serverless AI Documentation](https://aiplatform.console.greennode.ai/api-docs/maas)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "tradingagents",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
379
docs/docs/integrations/text_embedding/greennode.ipynb
Normal file
379
docs/docs/integrations/text_embedding/greennode.ipynb
Normal file
@@ -0,0 +1,379 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: GreenNode\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GreenNodeEmbeddings\n",
|
||||
"\n",
|
||||
">[GreenNode](https://greennode.ai/) is a global AI solutions provider and a **NVIDIA Preferred Partner**, delivering full-stack AI capabilities—from infrastructure to application—for enterprises across the US, MENA, and APAC regions. Operating on **world-class infrastructure** (LEED Gold, TIA‑942, Uptime Tier III), GreenNode empowers enterprises, startups, and researchers with a comprehensive suite of AI services\n",
|
||||
"\n",
|
||||
"This notebook provides a guide to getting started with `GreenNodeEmbeddings`. It enables you to perform semantic document search using various built-in connectors or your own custom data sources by generating high-quality vector representations of text.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [GreenNode](/docs/integrations/providers/greennode/) | [langchain-greennode](https://python.langchain.com/v0.2/api_reference/langchain_greennode/embeddings/langchain_greennode.embeddingsGreenNodeEmbeddings.html) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access GreenNode embedding models you'll need to create a GreenNode account, get an API key, and install the `langchain-greennode` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"GreenNode requires an API key for authentication, which can be provided either as the `api_key` parameter during initialization or set as the environment variable `GREENNODE_API_KEY`. You can obtain an API key by registering for an account on [GreenNode Serverless AI](https://aiplatform.console.greennode.ai/playground)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GREENNODE_API_KEY\"):\n",
|
||||
" os.environ[\"GREENNODE_API_KEY\"] = getpass.getpass(\"Enter your GreenNode API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain GreenNode integration lives in the `langchain-greennode` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-greennode"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"The `GreenNodeEmbeddings` class can be instantiated with optional parameters for the API key and model name:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_greennode import GreenNodeEmbeddings\n",
|
||||
"\n",
|
||||
"# Initialize the embeddings model\n",
|
||||
"embeddings = GreenNodeEmbeddings(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"BAAI/bge-m3\" # The default embedding model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models play a key role in retrieval-augmented generation (RAG) workflows by enabling both the indexing of content and its efficient retrieval. \n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "23df9f54",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"The `GreenNodeEmbeddings` class can be used independently to generate text embeddings without the need for a vector store. This is useful for tasks such as similarity scoring, clustering, or custom processing pipelines.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.01104736328125, -0.0281982421875, 0.0035858154296875, -0.0311279296875, -0.0106201171875, -0.039\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.01104736328125, -0.0281982421875, 0.0035858154296875, -0.0311279296875, -0.0106201171875, -0.039\n",
|
||||
"[-0.07177734375, -0.00017452239990234375, -0.002044677734375, -0.0299072265625, -0.0184326171875, -0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "be19dda0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async Support\n",
|
||||
"\n",
|
||||
"GreenNodeEmbeddings supports async operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "d556e655",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async query embedding dimension: 1024\n",
|
||||
"Async document embeddings count: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def generate_embeddings_async():\n",
|
||||
" # Embed a single query\n",
|
||||
" query_result = await embeddings.aembed_query(\"What is the capital of France?\")\n",
|
||||
" print(f\"Async query embedding dimension: {len(query_result)}\")\n",
|
||||
"\n",
|
||||
" # Embed multiple documents\n",
|
||||
" docs = [\n",
|
||||
" \"Paris is the capital of France\",\n",
|
||||
" \"Berlin is the capital of Germany\",\n",
|
||||
" \"Rome is the capital of Italy\",\n",
|
||||
" ]\n",
|
||||
" docs_result = await embeddings.aembed_documents(docs)\n",
|
||||
" print(f\"Async document embeddings count: {len(docs_result)}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await generate_embeddings_async()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "207a7966",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Document Similarity Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8bdb003b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document Similarity Matrix:\n",
|
||||
"Document 1: ['1.0000', '0.6005', '0.3542', '0.5788']\n",
|
||||
"Document 2: ['0.6005', '1.0000', '0.4154', '0.6170']\n",
|
||||
"Document 3: ['0.3542', '0.4154', '1.0000', '0.3528']\n",
|
||||
"Document 4: ['0.5788', '0.6170', '0.3528', '1.0000']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"from scipy.spatial.distance import cosine\n",
|
||||
"\n",
|
||||
"# Create some documents\n",
|
||||
"documents = [\n",
|
||||
" \"Machine learning algorithms build mathematical models based on sample data\",\n",
|
||||
" \"Deep learning uses neural networks with many layers\",\n",
|
||||
" \"Climate change is a major global environmental challenge\",\n",
|
||||
" \"Neural networks are inspired by the human brain's structure\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Embed the documents\n",
|
||||
"embeddings_list = embeddings.embed_documents(documents)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Function to calculate similarity\n",
|
||||
"def calculate_similarity(embedding1, embedding2):\n",
|
||||
" return 1 - cosine(embedding1, embedding2)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Print similarity matrix\n",
|
||||
"print(\"Document Similarity Matrix:\")\n",
|
||||
"for i, emb_i in enumerate(embeddings_list):\n",
|
||||
" similarities = []\n",
|
||||
" for j, emb_j in enumerate(embeddings_list):\n",
|
||||
" similarity = calculate_similarity(emb_i, emb_j)\n",
|
||||
" similarities.append(f\"{similarity:.4f}\")\n",
|
||||
" print(f\"Document {i + 1}: {similarities}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For more details about the GreenNode Serverless AI API, visit the [GreenNode Serverless AI Documentation](https://aiplatform.console.greennode.ai/api-docs/maas).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "tradingagents",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -95,35 +95,36 @@
|
||||
"id": "92019ef1-5d30-4985-b4e6-c0d98bdfe265",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Hugging Face Inference API\n",
|
||||
"We can also access embedding models via the Hugging Face Inference API, which does not require us to install ``sentence_transformers`` and download models locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "66f5c6ba-1446-43e1-b012-800d17cef300",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your HF Inference API Key:\n",
|
||||
"\n",
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"## Hugging Face Inference Providers\n",
|
||||
"\n",
|
||||
"inference_api_key = getpass.getpass(\"Enter your HF Inference API Key:\\n\\n\")"
|
||||
"We can also access embedding models via the [Inference Providers](https://huggingface.co/docs/inference-providers), which let's us use open source models on scalable serverless infrastructure.\n",
|
||||
"\n",
|
||||
"First, we need to get a read-only API key from [Hugging Face](https://huggingface.co/settings/tokens).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "c5576a6c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"huggingfacehub_api_token = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3ad10337",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can use the `HuggingFaceInferenceAPIEmbeddings` class to run open source embedding models via [Inference Providers](https://huggingface.co/docs/inference-providers)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d0623c1f-cd82-4862-9bce-3655cb9b66ac",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -139,10 +140,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings\n",
|
||||
"from langchain_huggingface import HuggingFaceInferenceAPIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = HuggingFaceInferenceAPIEmbeddings(\n",
|
||||
" api_key=inference_api_key, model_name=\"sentence-transformers/all-MiniLM-l6-v2\"\n",
|
||||
" api_key=huggingfacehub_api_token,\n",
|
||||
" model_name=\"sentence-transformers/all-MiniLM-l6-v2\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query_result = embeddings.embed_query(text)\n",
|
||||
|
||||
@@ -713,7 +713,7 @@
|
||||
"`qdr:d3` (past 3 days)\n",
|
||||
"`qdr:w2` (past 2 weeks)\n",
|
||||
"`qdr:m6` (past 6 months)\n",
|
||||
"`qdr:m2` (past 2 years)\n",
|
||||
"`qdr:y2` (past 2 years)\n",
|
||||
"\n",
|
||||
"For all supported filters simply go to [Google Search](https://google.com), search for something, click on \"Tools\", add your date filter and check the URL for \"tbs=\".\n"
|
||||
]
|
||||
|
||||
@@ -28,18 +28,7 @@
|
||||
"execution_count": 1,
|
||||
"id": "bec8d532-fec7-4dc7-9be3-020aa7bdb01f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.1.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.0.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -qU langchain-couchbase"
|
||||
]
|
||||
@@ -61,7 +50,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter the connection string for the Couchbase cluster: ········\n",
|
||||
@@ -278,16 +267,16 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['4a6b5252-24ca-4e48-97a9-c33211fc7736',\n",
|
||||
" '594a413d-761a-44f1-8f0c-6418700b198d',\n",
|
||||
" 'fdd8461c-f4e3-4c85-af8e-7782ce4d2311',\n",
|
||||
" '3f6a82b2-7464-4eee-b209-cbca5a236a8a',\n",
|
||||
" 'df8b87ad-464e-4f83-a007-ccf5a8fa4ff5',\n",
|
||||
" 'aa18502e-6fb4-4578-9c63-b9a299259b01',\n",
|
||||
" '8c55a17d-5fa7-4c30-a55d-7ded0d39bf46',\n",
|
||||
" '41b68c5a-ebf5-4d7a-a079-5e32926ca484',\n",
|
||||
" '146ac3e0-474a-422a-b0ac-c9fee718396b',\n",
|
||||
" 'e44941e9-fb3a-4090-88a0-9ffecee3e80e']"
|
||||
"['f125b836-f555-4449-98dc-cbda4e77ae3f',\n",
|
||||
" 'a28fccde-fd32-4775-9ca8-6cdb22ca7031',\n",
|
||||
" 'b1037c4b-947f-497f-84db-63a4def5080b',\n",
|
||||
" 'c7082b74-b385-4c4b-bbe5-0740909c01db',\n",
|
||||
" 'a7e31f62-13a5-4109-b881-8631aff7d46c',\n",
|
||||
" '9fcc2894-fdb1-41bd-9a93-8547747650f4',\n",
|
||||
" 'a5b0632d-abaf-4802-99b3-df6b6c99be29',\n",
|
||||
" '0475592e-4b7f-425d-91fd-ac2459d48a36',\n",
|
||||
" '94c6db4e-ba07-43ff-aa96-3a5d577db43a',\n",
|
||||
" 'd21c7feb-ad47-4e7d-84c5-785afb189160']"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -456,7 +445,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* [SIM=0.553145] The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees. [{'source': 'news'}]\n"
|
||||
"* [SIM=0.553112] The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees. [{'source': 'news'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -466,6 +455,52 @@
|
||||
" print(f\"* [SIM={score:3f}] {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "faa8ed12-989a-4cd4-90bf-6156f242f008",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Filtering Results\n",
|
||||
"\n",
|
||||
"You can filter the search results by specifying any filter on the text or metadata in the document that is supported by the Couchbase Search service. \n",
|
||||
"\n",
|
||||
"The `filter` can be any valid [SearchQuery](https://docs.couchbase.com/python-sdk/current/howtos/full-text-searching-with-sdk.html#search-queries) supported by the Couchbase Python SDK. These filters are applied before the Vector Search is performed. \n",
|
||||
"\n",
|
||||
"If you want to filter on one of the fields in the metadata, you need to specify it using `.`\n",
|
||||
"\n",
|
||||
"For example, to fetch the `source` field in the metadata, you need to specify `metadata.source`.\n",
|
||||
"\n",
|
||||
"Note that the filter needs to be supported by the Search Index."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "b1c4d3ac-e3d2-4cba-b765-954bf45357aa",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* The stock market is down 500 points today due to fears of a recession. [{'source': 'news'}] 0.3873019218444824\n",
|
||||
"* Robbers broke into the city bank and stole $1 million in cash. [{'source': 'news'}] 0.20637212693691254\n",
|
||||
"* The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees. [{'source': 'news'}] 0.10404900461435318\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from couchbase import search\n",
|
||||
"\n",
|
||||
"query = \"Are there any concerning financial news?\"\n",
|
||||
"filter_on_source = search.MatchQuery(\"news\", field=\"metadata.source\")\n",
|
||||
"results = vector_store.similarity_search_with_score(\n",
|
||||
" query, fields=[\"metadata.source\"], filter=filter_on_source, k=5\n",
|
||||
")\n",
|
||||
"for res, score in results:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}] {score}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9983e83d-efd0-4b75-80db-150e0694e822",
|
||||
@@ -484,7 +519,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 13,
|
||||
"id": "ffa743dc-4e89-405b-ad71-7390338889e6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -502,6 +537,44 @@
|
||||
"print(results[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e01eb05-77fc-49f8-a552-8af3c5d4460c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Query by turning into retriever\n",
|
||||
"\n",
|
||||
"You can also transform the vector store into a retriever for easier usage in your chains. \n",
|
||||
"\n",
|
||||
"Here is how to transform your vector store into a retriever and then invoke the retreiever with a simple query and filter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "3666265a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='c7082b74-b385-4c4b-bbe5-0740909c01db', metadata={'source': 'news'}, page_content='Robbers broke into the city bank and stole $1 million in cash.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever(\n",
|
||||
" search_type=\"similarity\",\n",
|
||||
" search_kwargs={\"k\": 1, \"score_threshold\": 0.5},\n",
|
||||
")\n",
|
||||
"filter_on_source = search.MatchQuery(\"news\", field=\"metadata.source\")\n",
|
||||
"retriever.invoke(\"Stealing from the bank is a crime\", filter=filter_on_source)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a5e45eb2-aa97-45df-bcc5-410e9626e506",
|
||||
@@ -529,7 +602,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 15,
|
||||
"id": "7d2e607d-6bbc-4cef-83e3-b6a28bb269ea",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -574,7 +647,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 16,
|
||||
"id": "dc06ba4a-8a6b-4c55-bb69-95cd92db273f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -611,7 +684,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 17,
|
||||
"id": "fd4749e6-ef4f-4cb5-95ff-37c4fa8283d8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -648,7 +721,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 18,
|
||||
"id": "b7b47e7d-c32f-4999-bce9-3c3c3cebffd0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -656,13 +729,15 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='We are cutting off Russia’s largest banks from the international financial system. \n",
|
||||
"page_content='And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. \n",
|
||||
"\n",
|
||||
"Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. \n",
|
||||
"We achieved this because we provided free vaccines, treatments, tests, and masks. \n",
|
||||
"\n",
|
||||
"We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. \n",
|
||||
"Of course, continuing this costs money. \n",
|
||||
"\n",
|
||||
"Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more.' metadata={'author': 'Jane Doe', 'date': '2017-01-01', 'rating': 3, 'source': '../../how_to/state_of_the_union.txt'}\n"
|
||||
"I will soon send Congress a request. \n",
|
||||
"\n",
|
||||
"The vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.' metadata={'author': 'Jane Doe', 'date': '2017-01-01', 'rating': 3, 'source': '../../how_to/state_of_the_union.txt'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -694,7 +769,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 19,
|
||||
"id": "7e8bf7c5-07d1-4c3f-86d7-1fa3a454dc7f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -702,7 +777,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(Document(id='8616f24425b94a52af3d32d20e6ffb4b', metadata={'author': 'John Doe', 'date': '2014-01-01', 'rating': 5, 'source': '../../how_to/state_of_the_union.txt'}, page_content='In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \\n\\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \\n\\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \\n\\nNow is the hour. \\n\\nOur moment of responsibility. \\n\\nOur test of resolve and conscience, of history itself.'), 0.361933544533826)\n"
|
||||
"(Document(id='3a90405c0f5b4c09a6646259678f1f61', metadata={'author': 'John Doe', 'date': '2014-01-01', 'rating': 5, 'source': '../../how_to/state_of_the_union.txt'}, page_content='In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \\n\\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \\n\\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \\n\\nNow is the hour. \\n\\nOur moment of responsibility. \\n\\nOur test of resolve and conscience, of history itself.'), 0.3573387440020518)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -736,7 +811,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 20,
|
||||
"id": "dd0fe7f1-aa40-4c6f-889b-99ad5efcd88b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -744,7 +819,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(Document(id='d9b36ef70b8942dda4db63563f51cf0f', metadata={'author': 'Jane Doe', 'date': '2017-01-01', 'rating': 3, 'source': '../../how_to/state_of_the_union.txt'}, page_content='We are cutting off Russia’s largest banks from the international financial system. \\n\\nPreventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. \\n\\nWe are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. \\n\\nTonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more.'), 0.7107075545629284)\n"
|
||||
"(Document(id='7115a704877a46ad94d661dd9c81cbc3', metadata={'author': 'Jane Doe', 'date': '2017-01-01', 'rating': 3, 'source': '../../how_to/state_of_the_union.txt'}, page_content='And with 75% of adult Americans fully vaccinated and hospitalizations down by 77%, most Americans can remove their masks, return to work, stay in the classroom, and move forward safely. \\n\\nWe achieved this because we provided free vaccines, treatments, tests, and masks. \\n\\nOf course, continuing this costs money. \\n\\nI will soon send Congress a request. \\n\\nThe vast majority of Americans have used these tools and may want to again, so I expect Congress to pass it quickly.'), 0.6898253780130769)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -764,6 +839,64 @@
|
||||
"print(results[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "65f9a24e-8c67-42e9-b995-6b4137da8c36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Note** \n",
|
||||
"\n",
|
||||
"The hybrid search results might contain documents that do not satisfy all the search parameters. This is due to the way the [scoring is calculated](https://docs.couchbase.com/server/current/search/run-searches.html#scoring). \n",
|
||||
"The score is a sum of both the vector search score and the queries in the hybrid search. If the Vector Search score is high, the combined score will be more than the results that match all the queries in the hybrid search. \n",
|
||||
"To avoid such results, please use the `filter` parameter instead of hybrid search."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "334de7ac-8fd1-42b1-856e-834508af8738",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Combining Hybrid Search Query with Filters\n",
|
||||
"Hybrid Search can be combined with filters to get the best of both hybrid search and the filters for results matching the requirements.\n",
|
||||
"\n",
|
||||
"In this example, we are checking for documents with a rating between 3 & 5 and matching the string \"independence\" in the text field."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "a360adba-03d2-4e25-877e-438538d2ea37",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(Document(id='23bb51b4e4d54a94ab0a95e72be8428c', metadata={'author': 'John Doe', 'date': '2012-01-01', 'rating': 3, 'source': '../../how_to/state_of_the_union.txt'}, page_content='And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. \\n\\nPutin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \\n\\nAnd a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.'), 0.30549919644400614)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_text = search.MatchQuery(\"independence\", field=\"text\")\n",
|
||||
"\n",
|
||||
"query = \"Any mention about independence?\"\n",
|
||||
"results = vector_store.similarity_search_with_score(\n",
|
||||
" query,\n",
|
||||
" search_options={\n",
|
||||
" \"query\": {\n",
|
||||
" \"min\": 3,\n",
|
||||
" \"max\": 5,\n",
|
||||
" \"inclusive_min\": True,\n",
|
||||
" \"inclusive_max\": True,\n",
|
||||
" \"field\": \"metadata.rating\",\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" filter=filter_text,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(results[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39258571-3233-45c3-a6ad-5c3c90ea2b1c",
|
||||
@@ -776,43 +909,6 @@
|
||||
"- [Couchbase Server](https://docs.couchbase.com/server/current/search/search-request-params.html#query-object)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "db0a1d74",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Query by turning into retriever\n",
|
||||
"\n",
|
||||
"You can also transform the vector store into a retriever for easier usage in your chains. \n",
|
||||
"\n",
|
||||
"Here is how to transform your vector store into a retriever and then invoke the retreiever with a simple query and filter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "3666265a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='3f6a82b2-7464-4eee-b209-cbca5a236a8a', metadata={'source': 'news'}, page_content='Robbers broke into the city bank and stole $1 million in cash.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever(\n",
|
||||
" search_type=\"similarity\",\n",
|
||||
" search_kwargs={\"k\": 1, \"score_threshold\": 0.5},\n",
|
||||
")\n",
|
||||
"retriever.invoke(\"Stealing from the bank is a crime\", filter={\"source\": \"news\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28ab35ec",
|
||||
@@ -832,7 +928,7 @@
|
||||
"id": "80958c2b-6a67-45e6-b7f0-fd2461d75e0f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Frequently Asked Questions"
|
||||
"## Frequently Asked Questions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -840,8 +936,8 @@
|
||||
"id": "4f7f9838-cc20-44bc-a72d-06f2cb6c3fca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Question: Should I create the Search index before creating the CouchbaseVectorStore object?\n",
|
||||
"Yes, currently you need to create the Search index before creating the `CouchbaseVectoreStore` object.\n"
|
||||
"### Question: Should I create the Search index before creating the CouchbaseSearchVectorStore object?\n",
|
||||
"Yes, currently you need to create the Search index before creating the `CouchbaseSearchVectoreStore` object.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -849,7 +945,7 @@
|
||||
"id": "3f0dbc1b-9e82-4ec3-9330-6b54de00661e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Question: I am not seeing all the fields that I specified in my search results. \n",
|
||||
"### Question: I am not seeing all the fields that I specified in my search results. \n",
|
||||
"\n",
|
||||
"In Couchbase, we can only return the fields stored in the Search index. Please ensure that the field that you are trying to access in the search results is part of the Search index.\n",
|
||||
"\n",
|
||||
@@ -865,10 +961,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3702977a-2e25-48b6-b662-edd5cb94cdec",
|
||||
"id": "0449a2e3-59d7-4b25-b09e-a2b062fef01f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Question: I am unable to see the metadata object in my search results. \n",
|
||||
"### Question: I am unable to see the metadata object in my search results. \n",
|
||||
"This is most likely due to the `metadata` field in the document not being indexed and/or stored by the Couchbase Search index. In order to index the `metadata` field in the document, you need to add it to the index as a child mapping. \n",
|
||||
"\n",
|
||||
"If you select to map all the fields in the mapping, you will be able to search by all metadata fields. Alternatively, to optimize the index, you can select the specific fields inside `metadata` object to be indexed. You can refer to the [docs](https://docs.couchbase.com/cloud/search/customize-index.html) to learn more about indexing child mappings.\n",
|
||||
@@ -879,6 +975,19 @@
|
||||
"* [Couchbase Server](https://docs.couchbase.com/server/current/search/create-child-mapping.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9b8632e-9bce-41c8-b6aa-e527b41de9b2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Question: What is the difference between filter and search_options / hybrid queries? \n",
|
||||
"Filters are [pre-filters](https://docs.couchbase.com/server/current/vector-search/pre-filtering-vector-search.html#about-pre-filtering) that are used to restrict the documents searched in a Search index. It is available in Couchbase Server 7.6.4 & higher.\n",
|
||||
"\n",
|
||||
"Hybrid Queries are additional search queries that can be used to tune the results being returned from the search index. \n",
|
||||
"\n",
|
||||
"Both filters and hybrid search queries have the same capabilites with slightly different syntax. Filters are [SearchQuery](https://docs.couchbase.com/python-sdk/current/howtos/full-text-searching-with-sdk.html#search-queries) objects while the hybrid search queries are [dictionaries](https://docs.couchbase.com/server/current/search/search-request-params.html).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d876b769",
|
||||
@@ -886,7 +995,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `CouchbaseSearchVectorStore` features and configurations head to the API reference: https://couchbase-ecosystem.github.io/langchain-couchbase/langchain_couchbase.html "
|
||||
"For detailed documentation of all `CouchbaseSearchVectorStore` features and configurations head to the [API reference](https://couchbase-ecosystem.github.io/langchain-couchbase/langchain_couchbase.html#module-langchain_couchbase.vectorstores.search_vector_store)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -249,6 +249,20 @@
|
||||
"tools = [search]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ecbc86d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"In many applications, you may want to define custom tools. LangChain supports custom\n",
|
||||
"tool creation via Python functions and other means. Refer to the\n",
|
||||
"[How to create tools](/docs/how_to/custom_tools/) guide for details.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e00068b0",
|
||||
|
||||
@@ -253,6 +253,10 @@ const config = {
|
||||
{
|
||||
title: "Community",
|
||||
items: [
|
||||
{
|
||||
label: "LangChain Forum",
|
||||
href: "https://forum.langchain.com/",
|
||||
},
|
||||
{
|
||||
label: "Twitter",
|
||||
href: "https://twitter.com/LangChainAI",
|
||||
|
||||
@@ -32,6 +32,7 @@ NOTEBOOKS_NO_EXECUTION = [
|
||||
"docs/docs/how_to/migrate_agent.ipynb", # TODO: resolve issue with asyncio / exception handling
|
||||
"docs/docs/how_to/qa_per_user.ipynb", # Requires Pinecone instance
|
||||
"docs/docs/how_to/query_high_cardinality.ipynb", # Heavy
|
||||
"docs/docs/how_to/response_metadata.ipynb", # Auth is annoying
|
||||
"docs/docs/how_to/split_by_token.ipynb", # TODO: requires Korean document, also heavy deps
|
||||
"docs/docs/how_to/tools_error.ipynb", # Deliberately raises error
|
||||
"docs/docs/how_to/tools_human.ipynb", # Requires human input()
|
||||
|
||||
@@ -1038,17 +1038,17 @@ const FEATURE_TABLES = {
|
||||
idsInAddDocuments: false,
|
||||
},
|
||||
{
|
||||
name: "CouchbaseVectorStore",
|
||||
name: "CouchbaseSearchVectorStore",
|
||||
link: "couchbase",
|
||||
deleteById: true,
|
||||
filtering: true,
|
||||
searchByVector: false,
|
||||
searchByVector: true,
|
||||
searchWithScore: true,
|
||||
async: true,
|
||||
passesStandardTests: false,
|
||||
multiTenancy: false,
|
||||
multiTenancy: true,
|
||||
local: true,
|
||||
idsInAddDocuments: false,
|
||||
idsInAddDocuments: true,
|
||||
},
|
||||
{
|
||||
name: "DatabricksVectorSearch",
|
||||
@@ -1176,9 +1176,9 @@ const FEATURE_TABLES = {
|
||||
searchWithScore: true,
|
||||
async: true,
|
||||
passesStandardTests: false,
|
||||
multiTenancy: false,
|
||||
multiTenancy: true,
|
||||
local: true,
|
||||
idsInAddDocuments: false,
|
||||
idsInAddDocuments: true,
|
||||
},
|
||||
{
|
||||
name: "Redis",
|
||||
|
||||
@@ -22,7 +22,7 @@ lint lint_diff lint_package lint_tests:
|
||||
|
||||
format format_diff:
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff format $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff check --select I --fix $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff check --fix $(PYTHON_FILES)
|
||||
|
||||
test tests: _test _e2e_test
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from typing import Optional
|
||||
from typing import Annotated, Optional
|
||||
|
||||
import typer
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from langchain_cli._version import __version__
|
||||
from langchain_cli.namespaces import app as app_namespace
|
||||
@@ -12,7 +11,9 @@ from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
|
||||
app = typer.Typer(no_args_is_help=True, add_completion=False)
|
||||
app.add_typer(
|
||||
template_namespace.package_cli, name="template", help=template_namespace.__doc__
|
||||
template_namespace.package_cli,
|
||||
name="template",
|
||||
help=template_namespace.__doc__,
|
||||
)
|
||||
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
|
||||
app.add_typer(
|
||||
@@ -33,23 +34,23 @@ app.command(
|
||||
)
|
||||
|
||||
|
||||
def version_callback(show_version: bool) -> None:
|
||||
def version_callback(show_version: bool) -> None: # noqa: FBT001
|
||||
if show_version:
|
||||
typer.echo(f"langchain-cli {__version__}")
|
||||
raise typer.Exit()
|
||||
raise typer.Exit
|
||||
|
||||
|
||||
@app.callback()
|
||||
def main(
|
||||
version: bool = typer.Option(
|
||||
False,
|
||||
version: bool = typer.Option( # noqa: FBT001
|
||||
False, # noqa: FBT003
|
||||
"--version",
|
||||
"-v",
|
||||
help="Print the current CLI version.",
|
||||
callback=version_callback,
|
||||
is_eager=True,
|
||||
),
|
||||
):
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
@@ -57,16 +58,15 @@ def main(
|
||||
def serve(
|
||||
*,
|
||||
port: Annotated[
|
||||
Optional[int], typer.Option(help="The port to run the server on")
|
||||
Optional[int],
|
||||
typer.Option(help="The port to run the server on"),
|
||||
] = None,
|
||||
host: Annotated[
|
||||
Optional[str], typer.Option(help="The host to run the server on")
|
||||
Optional[str],
|
||||
typer.Option(help="The host to run the server on"),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Start the LangServe app, whether it's a template or an app.
|
||||
"""
|
||||
|
||||
"""Start the LangServe app, whether it's a template or an app."""
|
||||
# see if is a template
|
||||
try:
|
||||
project_dir = get_package_root()
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
# type: ignore
|
||||
"""
|
||||
Development Scripts for template packages
|
||||
"""
|
||||
"""Development Scripts for template packages."""
|
||||
|
||||
from typing import Sequence
|
||||
from collections.abc import Sequence
|
||||
from typing import Literal
|
||||
|
||||
from fastapi import FastAPI
|
||||
from langserve import add_routes
|
||||
@@ -14,11 +12,9 @@ from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
def create_demo_server(
|
||||
*,
|
||||
config_keys: Sequence[str] = (),
|
||||
playground_type: str = "default",
|
||||
playground_type: Literal["default", "chat"] = "default",
|
||||
):
|
||||
"""
|
||||
Creates a demo server for the current template.
|
||||
"""
|
||||
"""Create a demo server for the current template."""
|
||||
app = FastAPI()
|
||||
package_root = get_package_root()
|
||||
pyproject = package_root / "pyproject.toml"
|
||||
@@ -35,9 +31,11 @@ def create_demo_server(
|
||||
playground_type=playground_type,
|
||||
)
|
||||
except KeyError as e:
|
||||
raise KeyError("Missing fields from pyproject.toml") from e
|
||||
msg = "Missing fields from pyproject.toml"
|
||||
raise KeyError(msg) from e
|
||||
except ImportError as e:
|
||||
raise ImportError("Could not import module defined in pyproject.toml") from e
|
||||
msg = "Could not import module defined in pyproject.toml"
|
||||
raise ImportError(msg) from e
|
||||
|
||||
return app
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ lint lint_diff lint_package lint_tests:
|
||||
|
||||
format format_diff:
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I --fix $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --fix $(PYTHON_FILES)
|
||||
|
||||
spell_check:
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
"""
|
||||
Manage LangChain apps
|
||||
"""
|
||||
"""Manage LangChain apps."""
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Annotated, Optional
|
||||
|
||||
import typer
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from langchain_cli.utils.events import create_events
|
||||
from langchain_cli.utils.git import (
|
||||
@@ -44,7 +41,7 @@ def new(
|
||||
] = None,
|
||||
*,
|
||||
package: Annotated[
|
||||
Optional[List[str]],
|
||||
Optional[list[str]],
|
||||
typer.Option(help="Packages to seed the project with"),
|
||||
] = None,
|
||||
pip: Annotated[
|
||||
@@ -63,15 +60,14 @@ def new(
|
||||
is_flag=True,
|
||||
),
|
||||
] = False,
|
||||
):
|
||||
"""
|
||||
Create a new LangServe application.
|
||||
"""
|
||||
) -> None:
|
||||
"""Create a new LangServe application."""
|
||||
has_packages = package is not None and len(package) > 0
|
||||
|
||||
if noninteractive:
|
||||
if name is None:
|
||||
raise typer.BadParameter("name is required when --non-interactive is set")
|
||||
msg = "name is required when --non-interactive is set"
|
||||
raise typer.BadParameter(msg)
|
||||
name_str = name
|
||||
pip_bool = bool(pip) # None should be false
|
||||
else:
|
||||
@@ -83,7 +79,9 @@ def new(
|
||||
package_prompt = "What package would you like to add? (leave blank to skip)"
|
||||
while True:
|
||||
package_str = typer.prompt(
|
||||
package_prompt, default="", show_default=False
|
||||
package_prompt,
|
||||
default="",
|
||||
show_default=False,
|
||||
)
|
||||
if not package_str:
|
||||
break
|
||||
@@ -125,27 +123,33 @@ def new(
|
||||
typer.echo("Then add templates with commands like:\n")
|
||||
typer.echo(" langchain app add extraction-openai-functions")
|
||||
typer.echo(
|
||||
" langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n"
|
||||
" langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n",
|
||||
)
|
||||
|
||||
|
||||
@app_cli.command()
|
||||
def add(
|
||||
dependencies: Annotated[
|
||||
Optional[List[str]], typer.Argument(help="The dependency to add")
|
||||
Optional[list[str]],
|
||||
typer.Argument(help="The dependency to add"),
|
||||
] = None,
|
||||
*,
|
||||
api_path: Annotated[List[str], typer.Option(help="API paths to add")] = [],
|
||||
api_path: Annotated[
|
||||
Optional[list[str]],
|
||||
typer.Option(help="API paths to add"),
|
||||
] = None,
|
||||
project_dir: Annotated[
|
||||
Optional[Path], typer.Option(help="The project directory")
|
||||
Optional[Path],
|
||||
typer.Option(help="The project directory"),
|
||||
] = None,
|
||||
repo: Annotated[
|
||||
List[str],
|
||||
Optional[list[str]],
|
||||
typer.Option(help="Install templates from a specific github repo instead"),
|
||||
] = [],
|
||||
] = None,
|
||||
branch: Annotated[
|
||||
List[str], typer.Option(help="Install templates from a specific branch")
|
||||
] = [],
|
||||
Optional[list[str]],
|
||||
typer.Option(help="Install templates from a specific branch"),
|
||||
] = None,
|
||||
pip: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
@@ -155,19 +159,24 @@ def add(
|
||||
prompt="Would you like to `pip install -e` the template(s)?",
|
||||
),
|
||||
],
|
||||
):
|
||||
"""
|
||||
Adds the specified template to the current LangServe app.
|
||||
) -> None:
|
||||
"""Add the specified template to the current LangServe app.
|
||||
|
||||
e.g.:
|
||||
langchain app add extraction-openai-functions
|
||||
langchain app add git+ssh://git@github.com/efriis/simple-pirate.git
|
||||
"""
|
||||
|
||||
if branch is None:
|
||||
branch = []
|
||||
if repo is None:
|
||||
repo = []
|
||||
if api_path is None:
|
||||
api_path = []
|
||||
if not branch and not repo:
|
||||
warnings.warn(
|
||||
"Adding templates from the default branch and repo is deprecated."
|
||||
" At a minimum, you will have to add `--branch v0.2` for this to work"
|
||||
" At a minimum, you will have to add `--branch v0.2` for this to work",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
parsed_deps = parse_dependencies(dependencies, repo, branch, api_path)
|
||||
@@ -177,20 +186,20 @@ def add(
|
||||
package_dir = project_root / "packages"
|
||||
|
||||
create_events(
|
||||
[{"event": "serve add", "properties": dict(parsed_dep=d)} for d in parsed_deps]
|
||||
[{"event": "serve add", "properties": {"parsed_dep": d}} for d in parsed_deps],
|
||||
)
|
||||
|
||||
# group by repo/ref
|
||||
grouped: Dict[Tuple[str, Optional[str]], List[DependencySource]] = {}
|
||||
grouped: dict[tuple[str, Optional[str]], list[DependencySource]] = {}
|
||||
for dep in parsed_deps:
|
||||
key_tup = (dep["git"], dep["ref"])
|
||||
lst = grouped.get(key_tup, [])
|
||||
lst.append(dep)
|
||||
grouped[key_tup] = lst
|
||||
|
||||
installed_destination_paths: List[Path] = []
|
||||
installed_destination_names: List[str] = []
|
||||
installed_exports: List[LangServeExport] = []
|
||||
installed_destination_paths: list[Path] = []
|
||||
installed_destination_names: list[str] = []
|
||||
installed_exports: list[LangServeExport] = []
|
||||
|
||||
for (git, ref), group_deps in grouped.items():
|
||||
if len(group_deps) == 1:
|
||||
@@ -217,7 +226,7 @@ def add(
|
||||
destination_path = package_dir / inner_api_path
|
||||
if destination_path.exists():
|
||||
typer.echo(
|
||||
f"Folder {str(inner_api_path)} already exists. " "Skipping...",
|
||||
f"Folder {inner_api_path} already exists. Skipping...",
|
||||
)
|
||||
continue
|
||||
copy_repo(source_path, destination_path)
|
||||
@@ -249,14 +258,14 @@ def add(
|
||||
typer.echo("Failed to print install command, continuing...")
|
||||
else:
|
||||
if pip:
|
||||
cmd = ["pip", "install", "-e"] + installed_destination_strs
|
||||
cmd = ["pip", "install", "-e", *installed_destination_strs]
|
||||
cmd_str = " \\\n ".join(installed_destination_strs)
|
||||
typer.echo(f"Running: pip install -e \\\n {cmd_str}")
|
||||
subprocess.run(cmd, cwd=cwd)
|
||||
subprocess.run(cmd, cwd=cwd) # noqa: S603
|
||||
|
||||
chain_names = []
|
||||
for e in installed_exports:
|
||||
original_candidate = f'{e["package_name"].replace("-", "_")}_chain'
|
||||
original_candidate = f"{e['package_name'].replace('-', '_')}_chain"
|
||||
candidate = original_candidate
|
||||
i = 2
|
||||
while candidate in chain_names:
|
||||
@@ -283,35 +292,35 @@ def add(
|
||||
if len(chain_names) == 1
|
||||
else f"these {len(chain_names)} templates"
|
||||
)
|
||||
lines = (
|
||||
["", f"To use {t}, add the following to your app:\n\n```", ""]
|
||||
+ imports
|
||||
+ [""]
|
||||
+ routes
|
||||
+ ["```"]
|
||||
)
|
||||
lines = [
|
||||
"",
|
||||
f"To use {t}, add the following to your app:\n\n```",
|
||||
"",
|
||||
*imports,
|
||||
"",
|
||||
*routes,
|
||||
"```",
|
||||
]
|
||||
typer.echo("\n".join(lines))
|
||||
|
||||
|
||||
@app_cli.command()
|
||||
def remove(
|
||||
api_paths: Annotated[List[str], typer.Argument(help="The API paths to remove")],
|
||||
api_paths: Annotated[list[str], typer.Argument(help="The API paths to remove")],
|
||||
*,
|
||||
project_dir: Annotated[
|
||||
Optional[Path], typer.Option(help="The project directory")
|
||||
Optional[Path],
|
||||
typer.Option(help="The project directory"),
|
||||
] = None,
|
||||
):
|
||||
"""
|
||||
Removes the specified package from the current LangServe app.
|
||||
"""
|
||||
|
||||
) -> None:
|
||||
"""Remove the specified package from the current LangServe app."""
|
||||
project_root = get_package_root(project_dir)
|
||||
|
||||
project_pyproject = project_root / "pyproject.toml"
|
||||
|
||||
package_root = project_root / "packages"
|
||||
|
||||
remove_deps: List[str] = []
|
||||
remove_deps: list[str] = []
|
||||
|
||||
for api_path in api_paths:
|
||||
package_dir = package_root / api_path
|
||||
@@ -325,7 +334,7 @@ def remove(
|
||||
|
||||
shutil.rmtree(package_dir)
|
||||
remove_deps.append(api_path)
|
||||
except Exception:
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
try:
|
||||
@@ -339,19 +348,19 @@ def remove(
|
||||
def serve(
|
||||
*,
|
||||
port: Annotated[
|
||||
Optional[int], typer.Option(help="The port to run the server on")
|
||||
Optional[int],
|
||||
typer.Option(help="The port to run the server on"),
|
||||
] = None,
|
||||
host: Annotated[
|
||||
Optional[str], typer.Option(help="The host to run the server on")
|
||||
Optional[str],
|
||||
typer.Option(help="The host to run the server on"),
|
||||
] = None,
|
||||
app: Annotated[
|
||||
Optional[str], typer.Option(help="The app to run, e.g. `app.server:app`")
|
||||
Optional[str],
|
||||
typer.Option(help="The app to run, e.g. `app.server:app`"),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Starts the LangServe app.
|
||||
"""
|
||||
|
||||
"""Start the LangServe app."""
|
||||
# add current dir as first entry of path
|
||||
sys.path.append(str(Path.cwd()))
|
||||
|
||||
@@ -361,5 +370,8 @@ def serve(
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
app_str, host=host_str, port=port if port is not None else 8000, reload=True
|
||||
app_str,
|
||||
host=host_str,
|
||||
port=port if port is not None else 8000,
|
||||
reload=True,
|
||||
)
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
"""
|
||||
Develop integration packages for LangChain.
|
||||
"""
|
||||
"""Develop integration packages for LangChain."""
|
||||
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional, cast
|
||||
from typing import Annotated, Optional, cast
|
||||
|
||||
import typer
|
||||
from typing_extensions import Annotated, TypedDict
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langchain_cli.utils.find_replace import replace_file, replace_glob
|
||||
|
||||
@@ -28,18 +26,20 @@ class Replacements(TypedDict):
|
||||
def _process_name(name: str, *, community: bool = False) -> Replacements:
|
||||
preprocessed = name.replace("_", "-").lower()
|
||||
|
||||
if preprocessed.startswith("langchain-"):
|
||||
preprocessed = preprocessed[len("langchain-") :]
|
||||
preprocessed = preprocessed.removeprefix("langchain-")
|
||||
|
||||
if not re.match(r"^[a-z][a-z0-9-]*$", preprocessed):
|
||||
raise ValueError(
|
||||
msg = (
|
||||
"Name should only contain lowercase letters (a-z), numbers, and hyphens"
|
||||
", and start with a letter."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if preprocessed.endswith("-"):
|
||||
raise ValueError("Name should not end with `-`.")
|
||||
msg = "Name should not end with `-`."
|
||||
raise ValueError(msg)
|
||||
if preprocessed.find("--") != -1:
|
||||
raise ValueError("Name should not contain consecutive hyphens.")
|
||||
msg = "Name should not contain consecutive hyphens."
|
||||
raise ValueError(msg)
|
||||
replacements: Replacements = {
|
||||
"__package_name__": f"langchain-{preprocessed}",
|
||||
"__module_name__": "langchain_" + preprocessed.replace("-", "_"),
|
||||
@@ -66,7 +66,7 @@ def new(
|
||||
Optional[str],
|
||||
typer.Option(
|
||||
help="The name of the integration in PascalCase. e.g. `MyIntegration`."
|
||||
" This is used to name classes like `MyIntegrationVectorStore`"
|
||||
" This is used to name classes like `MyIntegrationVectorStore`",
|
||||
),
|
||||
] = None,
|
||||
src: Annotated[
|
||||
@@ -84,28 +84,26 @@ def new(
|
||||
". e.g. `my-integration/my_integration.py`",
|
||||
),
|
||||
] = None,
|
||||
):
|
||||
"""
|
||||
Creates a new integration package.
|
||||
"""
|
||||
|
||||
) -> None:
|
||||
"""Create a new integration package."""
|
||||
try:
|
||||
replacements = _process_name(name)
|
||||
except ValueError as e:
|
||||
typer.echo(e)
|
||||
raise typer.Exit(code=1)
|
||||
raise typer.Exit(code=1) from None
|
||||
|
||||
if name_class:
|
||||
if not re.match(r"^[A-Z][a-zA-Z0-9]*$", name_class):
|
||||
typer.echo(
|
||||
"Name should only contain letters (a-z, A-Z), numbers, and underscores"
|
||||
", and start with a capital letter."
|
||||
", and start with a capital letter.",
|
||||
)
|
||||
raise typer.Exit(code=1)
|
||||
replacements["__ModuleName__"] = name_class
|
||||
else:
|
||||
replacements["__ModuleName__"] = typer.prompt(
|
||||
"Name of integration in PascalCase", default=replacements["__ModuleName__"]
|
||||
"Name of integration in PascalCase",
|
||||
default=replacements["__ModuleName__"],
|
||||
)
|
||||
|
||||
project_template_dir = Path(__file__).parents[1] / "integration_template"
|
||||
@@ -123,11 +121,11 @@ def new(
|
||||
shutil.move(destination_dir / "integration_template", package_dir)
|
||||
|
||||
# replacements in files
|
||||
replace_glob(destination_dir, "**/*", cast(Dict[str, str], replacements))
|
||||
replace_glob(destination_dir, "**/*", cast("dict[str, str]", replacements))
|
||||
|
||||
# poetry install
|
||||
subprocess.run(
|
||||
["poetry", "install", "--with", "lint,test,typing,test_integration"],
|
||||
["poetry", "install", "--with", "lint,test,typing,test_integration"], # noqa: S607
|
||||
cwd=destination_dir,
|
||||
)
|
||||
else:
|
||||
@@ -155,7 +153,7 @@ def new(
|
||||
if len(dst_paths) != len(set(dst_paths)):
|
||||
typer.echo(
|
||||
"Duplicate destination paths provided or computed - please "
|
||||
"specify them explicitly with --dst."
|
||||
"specify them explicitly with --dst.",
|
||||
)
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
@@ -167,7 +165,7 @@ def new(
|
||||
|
||||
for src_path, dst_path in zip(src_paths, dst_paths):
|
||||
shutil.copy(src_path, dst_path)
|
||||
replace_file(dst_path, cast(Dict[str, str], replacements))
|
||||
replace_file(dst_path, cast("dict[str, str]", replacements))
|
||||
|
||||
|
||||
TEMPLATE_MAP: dict[str, str] = {
|
||||
@@ -183,7 +181,7 @@ TEMPLATE_MAP: dict[str, str] = {
|
||||
"Retriever": "retrievers.ipynb",
|
||||
}
|
||||
|
||||
_component_types_str = ", ".join(f"`{k}`" for k in TEMPLATE_MAP.keys())
|
||||
_component_types_str = ", ".join(f"`{k}`" for k in TEMPLATE_MAP)
|
||||
|
||||
|
||||
@integration_cli.command()
|
||||
@@ -226,13 +224,11 @@ def create_doc(
|
||||
prompt="The relative path to the docs directory to place the new file in.",
|
||||
),
|
||||
] = "docs/docs/integrations/chat/",
|
||||
):
|
||||
"""
|
||||
Creates a new integration doc.
|
||||
"""
|
||||
) -> None:
|
||||
"""Create a new integration doc."""
|
||||
if component_type not in TEMPLATE_MAP:
|
||||
typer.echo(
|
||||
f"Unrecognized {component_type=}. Expected one of {_component_types_str}."
|
||||
f"Unrecognized {component_type=}. Expected one of {_component_types_str}.",
|
||||
)
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
|
||||
@@ -3,18 +3,20 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import pkgutil
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
def generate_raw_migrations(
|
||||
from_package: str, to_package: str, filter_by_all: bool = False
|
||||
) -> List[Tuple[str, str]]:
|
||||
from_package: str,
|
||||
to_package: str,
|
||||
filter_by_all: bool = False, # noqa: FBT001, FBT002
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Scan the `langchain` package and generate migrations for all modules."""
|
||||
package = importlib.import_module(from_package)
|
||||
|
||||
items = []
|
||||
for importer, modname, ispkg in pkgutil.walk_packages(
|
||||
package.__path__, package.__name__ + "."
|
||||
for _importer, modname, _ispkg in pkgutil.walk_packages(
|
||||
package.__path__,
|
||||
package.__name__ + ".",
|
||||
):
|
||||
try:
|
||||
module = importlib.import_module(modname)
|
||||
@@ -35,33 +37,35 @@ def generate_raw_migrations(
|
||||
obj = getattr(module, name, None)
|
||||
except ImportError:
|
||||
continue
|
||||
if obj and (inspect.isclass(obj) or inspect.isfunction(obj)):
|
||||
if obj.__module__.startswith(to_package):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}")
|
||||
)
|
||||
if (
|
||||
obj
|
||||
and (inspect.isclass(obj) or inspect.isfunction(obj))
|
||||
and obj.__module__.startswith(to_package)
|
||||
):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}"),
|
||||
)
|
||||
|
||||
if not filter_by_all:
|
||||
# Iterate over all members of the module
|
||||
for name, obj in inspect.getmembers(module):
|
||||
# Check if it's a class or function
|
||||
if inspect.isclass(obj) or inspect.isfunction(obj):
|
||||
# Check if the module name of the obj starts with
|
||||
# 'langchain_community'
|
||||
if obj.__module__.startswith(to_package):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}")
|
||||
)
|
||||
# Check if the module name of the obj starts with
|
||||
# 'langchain_community'
|
||||
if inspect.isclass(obj) or (
|
||||
inspect.isfunction(obj) and obj.__module__.startswith(to_package)
|
||||
):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}"),
|
||||
)
|
||||
|
||||
return items
|
||||
|
||||
|
||||
def generate_top_level_imports(pkg: str) -> List[Tuple[str, str]]:
|
||||
"""This code will look at all the top level modules in langchain_community.
|
||||
def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
|
||||
"""Look at all the top level modules in langchain_community.
|
||||
|
||||
It'll attempt to import everything from each __init__ file
|
||||
|
||||
for example,
|
||||
Attempt to import everything from each ``__init__`` file. For example,
|
||||
|
||||
langchain_community/
|
||||
chat_models/
|
||||
@@ -75,19 +79,19 @@ def generate_top_level_imports(pkg: str) -> List[Tuple[str, str]]:
|
||||
|
||||
Each tuple will contain the fully qualified path of the class / function to where
|
||||
its logic is defined
|
||||
(e.g., langchain_community.chat_models.xyz_implementation.ver2.XYZ)
|
||||
(e.g., ``langchain_community.chat_models.xyz_implementation.ver2.XYZ``)
|
||||
and the second tuple will contain the path
|
||||
to importing it from the top level namespaces
|
||||
(e.g., langchain_community.chat_models.XYZ)
|
||||
(e.g., ``langchain_community.chat_models.XYZ``)
|
||||
"""
|
||||
package = importlib.import_module(pkg)
|
||||
|
||||
items = []
|
||||
|
||||
# Function to handle importing from modules
|
||||
def handle_module(module, module_name):
|
||||
def handle_module(module, module_name) -> None:
|
||||
if hasattr(module, "__all__"):
|
||||
all_objects = getattr(module, "__all__")
|
||||
all_objects = module.__all__
|
||||
for name in all_objects:
|
||||
# Attempt to fetch each object declared in __all__
|
||||
obj = getattr(module, name, None)
|
||||
@@ -99,15 +103,16 @@ def generate_top_level_imports(pkg: str) -> List[Tuple[str, str]]:
|
||||
top_level_import = f"{module_name}.{name}"
|
||||
# Append the tuple with original and top-level paths
|
||||
items.append(
|
||||
(f"{original_module}.{original_name}", top_level_import)
|
||||
(f"{original_module}.{original_name}", top_level_import),
|
||||
)
|
||||
|
||||
# Handle the package itself (root level)
|
||||
handle_module(package, pkg)
|
||||
|
||||
# Only iterate through top-level modules/packages
|
||||
for finder, modname, ispkg in pkgutil.iter_modules(
|
||||
package.__path__, package.__name__ + "."
|
||||
for _finder, modname, ispkg in pkgutil.iter_modules(
|
||||
package.__path__,
|
||||
package.__name__ + ".",
|
||||
):
|
||||
if ispkg:
|
||||
try:
|
||||
@@ -120,14 +125,18 @@ def generate_top_level_imports(pkg: str) -> List[Tuple[str, str]]:
|
||||
|
||||
|
||||
def generate_simplified_migrations(
|
||||
from_package: str, to_package: str, filter_by_all: bool = True
|
||||
) -> List[Tuple[str, str]]:
|
||||
from_package: str,
|
||||
to_package: str,
|
||||
filter_by_all: bool = True, # noqa: FBT001, FBT002
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Get all the raw migrations, then simplify them if possible."""
|
||||
raw_migrations = generate_raw_migrations(
|
||||
from_package, to_package, filter_by_all=filter_by_all
|
||||
from_package,
|
||||
to_package,
|
||||
filter_by_all=filter_by_all,
|
||||
)
|
||||
top_level_simplifications = generate_top_level_imports(to_package)
|
||||
top_level_dict = {full: top_level for full, top_level in top_level_simplifications}
|
||||
top_level_dict = dict(top_level_simplifications)
|
||||
simple_migrations = []
|
||||
for migration in raw_migrations:
|
||||
original, new = migration
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
def split_package(package: str) -> Tuple[str, str]:
|
||||
"""Split a package name into the containing package and the final name"""
|
||||
def split_package(package: str) -> tuple[str, str]:
|
||||
"""Split a package name into the containing package and the final name."""
|
||||
parts = package.split(".")
|
||||
return ".".join(parts[:-1]), parts[-1]
|
||||
|
||||
|
||||
def dump_migrations_as_grit(name: str, migration_pairs: List[Tuple[str, str]]):
|
||||
def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -> str:
|
||||
"""Dump the migration pairs as a Grit file."""
|
||||
output = "language python"
|
||||
remapped = ",\n".join(
|
||||
[
|
||||
f"""
|
||||
@@ -21,10 +17,10 @@ def dump_migrations_as_grit(name: str, migration_pairs: List[Tuple[str, str]]):
|
||||
]
|
||||
"""
|
||||
for from_module, to_module in migration_pairs
|
||||
]
|
||||
],
|
||||
)
|
||||
pattern_name = f"langchain_migrate_{name}"
|
||||
output = f"""
|
||||
return f"""
|
||||
language python
|
||||
|
||||
// This migration is generated automatically - do not manually edit this file
|
||||
@@ -37,4 +33,3 @@ pattern {pattern_name}() {{
|
||||
// Add this for invoking directly
|
||||
{pattern_name}()
|
||||
"""
|
||||
return output
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Generate migrations for partner packages."""
|
||||
|
||||
import importlib
|
||||
from typing import List, Tuple
|
||||
|
||||
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
|
||||
from langchain_core.embeddings import Embeddings
|
||||
@@ -19,7 +18,7 @@ from langchain_cli.namespaces.migrate.generate.utils import (
|
||||
# PUBLIC API
|
||||
|
||||
|
||||
def get_migrations_for_partner_package(pkg_name: str) -> List[Tuple[str, str]]:
|
||||
def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
|
||||
"""Generate migrations from community package to partner package.
|
||||
|
||||
This code works
|
||||
@@ -29,6 +28,7 @@ def get_migrations_for_partner_package(pkg_name: str) -> List[Tuple[str, str]]:
|
||||
|
||||
Returns:
|
||||
List of 2-tuples containing old and new import paths.
|
||||
|
||||
"""
|
||||
package = importlib.import_module(pkg_name)
|
||||
classes_ = find_subclasses_in_module(
|
||||
@@ -47,9 +47,8 @@ def get_migrations_for_partner_package(pkg_name: str) -> List[Tuple[str, str]]:
|
||||
|
||||
old_paths = community_classes + imports_for_pkg
|
||||
|
||||
migrations = [
|
||||
return [
|
||||
(f"{module}.{item}", f"{pkg_name}.{item}")
|
||||
for module, item in old_paths
|
||||
if item in classes_
|
||||
]
|
||||
return migrations
|
||||
|
||||
@@ -3,7 +3,7 @@ import inspect
|
||||
import os
|
||||
import pathlib
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Optional, Tuple, Type
|
||||
from typing import Any, Optional
|
||||
|
||||
HERE = Path(__file__).parent
|
||||
# Should bring us to [root]/src
|
||||
@@ -20,7 +20,7 @@ class ImportExtractor(ast.NodeVisitor):
|
||||
self.imports: list = []
|
||||
self.package = from_package
|
||||
|
||||
def visit_ImportFrom(self, node):
|
||||
def visit_ImportFrom(self, node) -> None: # noqa: N802
|
||||
if node.module and (
|
||||
self.package is None or str(node.module).startswith(self.package)
|
||||
):
|
||||
@@ -29,7 +29,7 @@ class ImportExtractor(ast.NodeVisitor):
|
||||
self.generic_visit(node)
|
||||
|
||||
|
||||
def _get_class_names(code: str) -> List[str]:
|
||||
def _get_class_names(code: str) -> list[str]:
|
||||
"""Extract class names from a code string."""
|
||||
# Parse the content of the file into an AST
|
||||
tree = ast.parse(code)
|
||||
@@ -39,7 +39,7 @@ def _get_class_names(code: str) -> List[str]:
|
||||
|
||||
# Define a node visitor class to collect class names
|
||||
class ClassVisitor(ast.NodeVisitor):
|
||||
def visit_ClassDef(self, node):
|
||||
def visit_ClassDef(self, node) -> None: # noqa: N802
|
||||
class_names.append(node.name)
|
||||
self.generic_visit(node)
|
||||
|
||||
@@ -49,7 +49,7 @@ def _get_class_names(code: str) -> List[str]:
|
||||
return class_names
|
||||
|
||||
|
||||
def is_subclass(class_obj: Any, classes_: List[Type]) -> bool:
|
||||
def is_subclass(class_obj: Any, classes_: list[type]) -> bool:
|
||||
"""Check if the given class object is a subclass of any class in list classes."""
|
||||
return any(
|
||||
issubclass(class_obj, kls)
|
||||
@@ -58,17 +58,17 @@ def is_subclass(class_obj: Any, classes_: List[Type]) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def find_subclasses_in_module(module, classes_: List[Type]) -> List[str]:
|
||||
def find_subclasses_in_module(module, classes_: list[type]) -> list[str]:
|
||||
"""Find all classes in the module that inherit from one of the classes."""
|
||||
subclasses = []
|
||||
# Iterate over all attributes of the module that are classes
|
||||
for name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
for _name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
if is_subclass(obj, classes_):
|
||||
subclasses.append(obj.__name__)
|
||||
return subclasses
|
||||
|
||||
|
||||
def _get_all_classnames_from_file(file: Path, pkg: str) -> List[Tuple[str, str]]:
|
||||
def _get_all_classnames_from_file(file: Path, pkg: str) -> list[tuple[str, str]]:
|
||||
"""Extract all class names from a file."""
|
||||
with open(file, encoding="utf-8") as f:
|
||||
code = f.read()
|
||||
@@ -79,8 +79,10 @@ def _get_all_classnames_from_file(file: Path, pkg: str) -> List[Tuple[str, str]]
|
||||
|
||||
|
||||
def identify_all_imports_in_file(
|
||||
file: str, *, from_package: Optional[str] = None
|
||||
) -> List[Tuple[str, str]]:
|
||||
file: str,
|
||||
*,
|
||||
from_package: Optional[str] = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Let's also identify all the imports in the given file."""
|
||||
with open(file, encoding="utf-8") as f:
|
||||
code = f.read()
|
||||
@@ -96,14 +98,17 @@ def identify_pkg_source(pkg_root: str) -> pathlib.Path:
|
||||
|
||||
Returns:
|
||||
Returns the path to the source code for the package.
|
||||
|
||||
"""
|
||||
dirs = [d for d in Path(pkg_root).iterdir() if d.is_dir()]
|
||||
matching_dirs = [d for d in dirs if d.name.startswith("langchain_")]
|
||||
assert len(matching_dirs) == 1, "There should be only one langchain package."
|
||||
if len(matching_dirs) != 1:
|
||||
msg = "There should be only one langchain package."
|
||||
raise ValueError(msg)
|
||||
return matching_dirs[0]
|
||||
|
||||
|
||||
def list_classes_by_package(pkg_root: str) -> List[Tuple[str, str]]:
|
||||
def list_classes_by_package(pkg_root: str) -> list[tuple[str, str]]:
|
||||
"""List all classes in a package."""
|
||||
module_classes = []
|
||||
pkg_source = identify_pkg_source(pkg_root)
|
||||
@@ -117,7 +122,7 @@ def list_classes_by_package(pkg_root: str) -> List[Tuple[str, str]]:
|
||||
return module_classes
|
||||
|
||||
|
||||
def list_init_imports_by_package(pkg_root: str) -> List[Tuple[str, str]]:
|
||||
def list_init_imports_by_package(pkg_root: str) -> list[tuple[str, str]]:
|
||||
"""List all the things that are being imported in a package by module."""
|
||||
imports = []
|
||||
pkg_source = identify_pkg_source(pkg_root)
|
||||
@@ -125,7 +130,7 @@ def list_init_imports_by_package(pkg_root: str) -> List[Tuple[str, str]]:
|
||||
files = list(Path(pkg_source).rglob("*.py"))
|
||||
|
||||
for file in files:
|
||||
if not file.name == "__init__.py":
|
||||
if file.name != "__init__.py":
|
||||
continue
|
||||
import_in_file = identify_all_imports_in_file(str(file))
|
||||
module_name = _get_current_module(file, pkg_root)
|
||||
@@ -134,8 +139,10 @@ def list_init_imports_by_package(pkg_root: str) -> List[Tuple[str, str]]:
|
||||
|
||||
|
||||
def find_imports_from_package(
|
||||
code: str, *, from_package: Optional[str] = None
|
||||
) -> List[Tuple[str, str]]:
|
||||
code: str,
|
||||
*,
|
||||
from_package: Optional[str] = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
# Parse the code into an AST
|
||||
tree = ast.parse(code)
|
||||
# Create an instance of the visitor
|
||||
|
||||
@@ -4,7 +4,7 @@ from pathlib import Path
|
||||
|
||||
import rich
|
||||
import typer
|
||||
from gritql import run # type: ignore
|
||||
from gritql import run # type: ignore[import]
|
||||
from typer import Option
|
||||
|
||||
|
||||
@@ -17,13 +17,13 @@ def get_gritdir_path() -> Path:
|
||||
def migrate(
|
||||
ctx: typer.Context,
|
||||
# Using diff instead of dry-run for backwards compatibility with the old CLI
|
||||
diff: bool = Option(
|
||||
False,
|
||||
diff: bool = Option( # noqa: FBT001
|
||||
False, # noqa: FBT003
|
||||
"--diff",
|
||||
help="Show the changes that would be made without applying them.",
|
||||
),
|
||||
interactive: bool = Option(
|
||||
False,
|
||||
interactive: bool = Option( # noqa: FBT001
|
||||
False, # noqa: FBT003
|
||||
"--interactive",
|
||||
help="Prompt for confirmation before making each change",
|
||||
),
|
||||
@@ -54,7 +54,7 @@ def migrate(
|
||||
'⚠️ This script is a "best-effort", and is likely to make some '
|
||||
"mistakes.\n\n"
|
||||
"🛡️ Backup your code prior to running the migration script -- it will "
|
||||
"modify your files!\n\n"
|
||||
"modify your files!\n\n",
|
||||
)
|
||||
rich.print("-" * 10)
|
||||
rich.print()
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
"""
|
||||
Develop installable templates.
|
||||
"""
|
||||
"""Develop installable templates."""
|
||||
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from typing import Annotated, Optional
|
||||
|
||||
import typer
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
|
||||
@@ -19,14 +16,12 @@ package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
|
||||
@package_cli.command()
|
||||
def new(
|
||||
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
|
||||
with_poetry: Annotated[
|
||||
with_poetry: Annotated[ # noqa: FBT002
|
||||
bool,
|
||||
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
|
||||
] = False,
|
||||
):
|
||||
"""
|
||||
Creates a new template package.
|
||||
"""
|
||||
) -> None:
|
||||
"""Create a new template package."""
|
||||
computed_name = name if name != "." else Path.cwd().name
|
||||
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
|
||||
|
||||
@@ -58,8 +53,9 @@ def new(
|
||||
pyproject_contents = pyproject.read_text()
|
||||
pyproject.write_text(
|
||||
pyproject_contents.replace("__package_name__", package_name).replace(
|
||||
"__module_name__", module_name
|
||||
)
|
||||
"__module_name__",
|
||||
module_name,
|
||||
),
|
||||
)
|
||||
|
||||
# move module folder
|
||||
@@ -76,23 +72,26 @@ def new(
|
||||
readme_contents = readme.read_text()
|
||||
readme.write_text(
|
||||
readme_contents.replace("__package_name__", package_name).replace(
|
||||
"__app_route_code__", app_route_code
|
||||
)
|
||||
"__app_route_code__",
|
||||
app_route_code,
|
||||
),
|
||||
)
|
||||
|
||||
# poetry install
|
||||
if with_poetry:
|
||||
subprocess.run(["poetry", "install"], cwd=destination_dir)
|
||||
subprocess.run(["poetry", "install"], cwd=destination_dir) # noqa: S607
|
||||
|
||||
|
||||
@package_cli.command()
|
||||
def serve(
|
||||
*,
|
||||
port: Annotated[
|
||||
Optional[int], typer.Option(help="The port to run the server on")
|
||||
Optional[int],
|
||||
typer.Option(help="The port to run the server on"),
|
||||
] = None,
|
||||
host: Annotated[
|
||||
Optional[str], typer.Option(help="The host to run the server on")
|
||||
Optional[str],
|
||||
typer.Option(help="The host to run the server on"),
|
||||
] = None,
|
||||
configurable: Annotated[
|
||||
Optional[bool],
|
||||
@@ -109,9 +108,7 @@ def serve(
|
||||
),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""
|
||||
Starts a demo app for this template.
|
||||
"""
|
||||
"""Start a demo app for this template."""
|
||||
# load pyproject.toml
|
||||
project_dir = get_package_root()
|
||||
pyproject = project_dir / "pyproject.toml"
|
||||
@@ -143,10 +140,8 @@ def serve(
|
||||
|
||||
|
||||
@package_cli.command()
|
||||
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None:
|
||||
"""
|
||||
List all or search for available templates.
|
||||
"""
|
||||
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None: # noqa: A001
|
||||
"""List all or search for available templates."""
|
||||
from langchain_cli.utils.github import list_packages
|
||||
|
||||
packages = list_packages(contains=contains)
|
||||
|
||||
@@ -8,7 +8,7 @@ WORKDIR /code
|
||||
|
||||
COPY ./pyproject.toml ./README.md ./poetry.lock* ./
|
||||
|
||||
COPY ./package[s] ./packages
|
||||
COPY ./packages ./packages
|
||||
|
||||
RUN poetry install --no-interaction --no-ansi --no-root
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import http.client
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional, TypedDict
|
||||
from typing import Any, Optional, TypedDict
|
||||
|
||||
WRITE_KEY = "310apTK0HUFl4AOv"
|
||||
|
||||
|
||||
class EventDict(TypedDict):
|
||||
event: str
|
||||
properties: Optional[Dict[str, Any]]
|
||||
properties: Optional[dict[str, Any]]
|
||||
|
||||
|
||||
def create_events(events: List[EventDict]) -> Optional[Any]:
|
||||
def create_events(events: list[EventDict]) -> Optional[Any]:
|
||||
try:
|
||||
data = {
|
||||
"events": [
|
||||
@@ -20,7 +20,7 @@ def create_events(events: List[EventDict]) -> Optional[Any]:
|
||||
"properties": event.get("properties"),
|
||||
}
|
||||
for event in events
|
||||
]
|
||||
],
|
||||
}
|
||||
|
||||
conn = http.client.HTTPSConnection("app.firstpartyhq.com")
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
|
||||
def find_and_replace(source: str, replacements: Dict[str, str]) -> str:
|
||||
def find_and_replace(source: str, replacements: dict[str, str]) -> str:
|
||||
rtn = source
|
||||
|
||||
# replace keys in deterministic alphabetical order
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import hashlib
|
||||
import re
|
||||
import shutil
|
||||
from collections.abc import Sequence
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Sequence, TypedDict
|
||||
from typing import Optional, TypedDict
|
||||
|
||||
from git import Repo
|
||||
|
||||
@@ -18,7 +19,7 @@ class DependencySource(TypedDict):
|
||||
ref: Optional[str]
|
||||
subdirectory: Optional[str]
|
||||
api_path: Optional[str]
|
||||
event_metadata: Dict
|
||||
event_metadata: dict
|
||||
|
||||
|
||||
# use poetry dependency string format
|
||||
@@ -30,10 +31,11 @@ def parse_dependency_string(
|
||||
) -> DependencySource:
|
||||
if dep is not None and dep.startswith("git+"):
|
||||
if repo is not None or branch is not None:
|
||||
raise ValueError(
|
||||
msg = (
|
||||
"If a dependency starts with git+, you cannot manually specify "
|
||||
"a repo or branch."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
# remove git+
|
||||
gitstring = dep[4:]
|
||||
subdirectory = None
|
||||
@@ -42,9 +44,8 @@ def parse_dependency_string(
|
||||
if "#subdirectory=" in gitstring:
|
||||
gitstring, subdirectory = gitstring.split("#subdirectory=")
|
||||
if "#" in subdirectory or "@" in subdirectory:
|
||||
raise ValueError(
|
||||
"#subdirectory must be the last part of the dependency string"
|
||||
)
|
||||
msg = "#subdirectory must be the last part of the dependency string"
|
||||
raise ValueError(msg)
|
||||
|
||||
# find first slash after ://
|
||||
# find @ or # after that slash
|
||||
@@ -53,9 +54,8 @@ def parse_dependency_string(
|
||||
|
||||
# find first slash after ://
|
||||
if "://" not in gitstring:
|
||||
raise ValueError(
|
||||
"git+ dependencies must start with git+https:// or git+ssh://"
|
||||
)
|
||||
msg = "git+ dependencies must start with git+https:// or git+ssh://"
|
||||
raise ValueError(msg)
|
||||
|
||||
_, find_slash = gitstring.split("://", 1)
|
||||
|
||||
@@ -65,7 +65,7 @@ def parse_dependency_string(
|
||||
else:
|
||||
_, post_slash = find_slash.split("/", 1)
|
||||
if "@" in post_slash or "#" in post_slash:
|
||||
_, ref = re.split(r"[@#]", post_slash, 1)
|
||||
_, ref = re.split(r"[@#]", post_slash, maxsplit=1)
|
||||
|
||||
# gitstring is everything before that
|
||||
gitstring = gitstring[: -len(ref) - 1] if ref is not None else gitstring
|
||||
@@ -78,51 +78,53 @@ def parse_dependency_string(
|
||||
event_metadata={"dependency_string": dep},
|
||||
)
|
||||
|
||||
elif dep is not None and dep.startswith("https://"):
|
||||
raise ValueError("Only git dependencies are supported")
|
||||
else:
|
||||
# if repo is none, use default, including subdirectory
|
||||
base_subdir = Path(DEFAULT_GIT_SUBDIRECTORY) if repo is None else Path()
|
||||
subdir = str(base_subdir / dep) if dep is not None else None
|
||||
gitstring = (
|
||||
DEFAULT_GIT_REPO
|
||||
if repo is None
|
||||
else f"https://github.com/{repo.strip('/')}.git"
|
||||
)
|
||||
ref = DEFAULT_GIT_REF if branch is None else branch
|
||||
# it's a default git repo dependency
|
||||
return DependencySource(
|
||||
git=gitstring,
|
||||
ref=ref,
|
||||
subdirectory=subdir,
|
||||
api_path=api_path,
|
||||
event_metadata={
|
||||
"dependency_string": dep,
|
||||
"used_repo_flag": repo is not None,
|
||||
"used_branch_flag": branch is not None,
|
||||
},
|
||||
)
|
||||
if dep is not None and dep.startswith("https://"):
|
||||
msg = "Only git dependencies are supported"
|
||||
raise ValueError(msg)
|
||||
# if repo is none, use default, including subdirectory
|
||||
base_subdir = Path(DEFAULT_GIT_SUBDIRECTORY) if repo is None else Path()
|
||||
subdir = str(base_subdir / dep) if dep is not None else None
|
||||
gitstring = (
|
||||
DEFAULT_GIT_REPO
|
||||
if repo is None
|
||||
else f"https://github.com/{repo.strip('/')}.git"
|
||||
)
|
||||
ref = DEFAULT_GIT_REF if branch is None else branch
|
||||
# it's a default git repo dependency
|
||||
return DependencySource(
|
||||
git=gitstring,
|
||||
ref=ref,
|
||||
subdirectory=subdir,
|
||||
api_path=api_path,
|
||||
event_metadata={
|
||||
"dependency_string": dep,
|
||||
"used_repo_flag": repo is not None,
|
||||
"used_branch_flag": branch is not None,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _list_arg_to_length(arg: Optional[List[str]], num: int) -> Sequence[Optional[str]]:
|
||||
def _list_arg_to_length(arg: Optional[list[str]], num: int) -> Sequence[Optional[str]]:
|
||||
if not arg:
|
||||
return [None] * num
|
||||
elif len(arg) == 1:
|
||||
if len(arg) == 1:
|
||||
return arg * num
|
||||
elif len(arg) == num:
|
||||
if len(arg) == num:
|
||||
return arg
|
||||
else:
|
||||
raise ValueError(f"Argument must be of length 1 or {num}")
|
||||
msg = f"Argument must be of length 1 or {num}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def parse_dependencies(
|
||||
dependencies: Optional[List[str]],
|
||||
repo: List[str],
|
||||
branch: List[str],
|
||||
api_path: List[str],
|
||||
) -> List[DependencySource]:
|
||||
dependencies: Optional[list[str]],
|
||||
repo: list[str],
|
||||
branch: list[str],
|
||||
api_path: list[str],
|
||||
) -> list[DependencySource]:
|
||||
num_deps = max(
|
||||
len(dependencies) if dependencies is not None else 0, len(repo), len(branch)
|
||||
len(dependencies) if dependencies is not None else 0,
|
||||
len(repo),
|
||||
len(branch),
|
||||
)
|
||||
if (
|
||||
(dependencies and len(dependencies) != num_deps)
|
||||
@@ -130,10 +132,11 @@ def parse_dependencies(
|
||||
or (repo and len(repo) not in [1, num_deps])
|
||||
or (branch and len(branch) not in [1, num_deps])
|
||||
):
|
||||
raise ValueError(
|
||||
msg = (
|
||||
"Number of defined repos/branches/api_paths did not match the "
|
||||
"number of templates."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
inner_deps = _list_arg_to_length(dependencies, num_deps)
|
||||
inner_api_paths = _list_arg_to_length(api_path, num_deps)
|
||||
inner_repos = _list_arg_to_length(repo, num_deps)
|
||||
@@ -142,7 +145,10 @@ def parse_dependencies(
|
||||
return [
|
||||
parse_dependency_string(iter_dep, iter_repo, iter_branch, iter_api_path)
|
||||
for iter_dep, iter_repo, iter_branch, iter_api_path in zip(
|
||||
inner_deps, inner_repos, inner_branches, inner_api_paths
|
||||
inner_deps,
|
||||
inner_repos,
|
||||
inner_branches,
|
||||
inner_api_paths,
|
||||
)
|
||||
]
|
||||
|
||||
@@ -150,10 +156,10 @@ def parse_dependencies(
|
||||
def _get_repo_path(gitstring: str, ref: Optional[str], repo_dir: Path) -> Path:
|
||||
# only based on git for now
|
||||
ref_str = ref if ref is not None else ""
|
||||
hashed = hashlib.sha256((f"{gitstring}:{ref_str}").encode("utf-8")).hexdigest()[:8]
|
||||
hashed = hashlib.sha256((f"{gitstring}:{ref_str}").encode()).hexdigest()[:8]
|
||||
|
||||
removed_protocol = gitstring.split("://")[-1]
|
||||
removed_basename = re.split(r"[/:]", removed_protocol, 1)[-1]
|
||||
removed_basename = re.split(r"[/:]", removed_protocol, maxsplit=1)[-1]
|
||||
removed_extras = removed_basename.split("#")[0]
|
||||
foldername = re.sub(r"\W", "_", removed_extras)
|
||||
|
||||
@@ -169,7 +175,7 @@ def update_repo(gitstring: str, ref: Optional[str], repo_dir: Path) -> Path:
|
||||
try:
|
||||
repo = Repo(repo_path)
|
||||
if repo.active_branch.name != ref:
|
||||
raise ValueError()
|
||||
raise ValueError
|
||||
repo.remotes.origin.pull()
|
||||
except Exception:
|
||||
# if it fails, delete and clone again
|
||||
@@ -185,8 +191,7 @@ def copy_repo(
|
||||
source: Path,
|
||||
destination: Path,
|
||||
) -> None:
|
||||
"""
|
||||
Copies a repo, ignoring git folders.
|
||||
"""Copiy a repo, ignoring git folders.
|
||||
|
||||
Raises FileNotFound error if it can't find source
|
||||
"""
|
||||
|
||||
@@ -3,27 +3,30 @@ import json
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def list_packages(*, contains: Optional[str] = None):
|
||||
def list_packages(*, contains: Optional[str] = None) -> list[str]:
|
||||
conn = http.client.HTTPSConnection("api.github.com")
|
||||
try:
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
"User-Agent": "langchain-cli",
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
"User-Agent": "langchain-cli",
|
||||
}
|
||||
conn.request(
|
||||
"GET",
|
||||
"/repos/langchain-ai/langchain/contents/templates",
|
||||
headers=headers,
|
||||
)
|
||||
res = conn.getresponse()
|
||||
|
||||
conn.request(
|
||||
"GET", "/repos/langchain-ai/langchain/contents/templates", headers=headers
|
||||
)
|
||||
res = conn.getresponse()
|
||||
res_str = res.read()
|
||||
|
||||
res_str = res.read()
|
||||
|
||||
data = json.loads(res_str)
|
||||
package_names = [
|
||||
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
|
||||
]
|
||||
package_names_filtered = (
|
||||
[p for p in package_names if contains in p] if contains else package_names
|
||||
)
|
||||
return package_names_filtered
|
||||
data = json.loads(res_str)
|
||||
package_names = [
|
||||
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
|
||||
]
|
||||
return (
|
||||
[p for p in package_names if contains in p] if contains else package_names
|
||||
)
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Set, TypedDict
|
||||
from typing import Any, Optional, TypedDict
|
||||
|
||||
from tomlkit import load
|
||||
|
||||
@@ -7,7 +7,7 @@ from tomlkit import load
|
||||
def get_package_root(cwd: Optional[Path] = None) -> Path:
|
||||
# traverse path for routes to host (any directory holding a pyproject.toml file)
|
||||
package_root = Path.cwd() if cwd is None else cwd
|
||||
visited: Set[Path] = set()
|
||||
visited: set[Path] = set()
|
||||
while package_root not in visited:
|
||||
visited.add(package_root)
|
||||
|
||||
@@ -15,17 +15,18 @@ def get_package_root(cwd: Optional[Path] = None) -> Path:
|
||||
if pyproject_path.exists():
|
||||
return package_root
|
||||
package_root = package_root.parent
|
||||
raise FileNotFoundError("No pyproject.toml found")
|
||||
msg = "No pyproject.toml found"
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
|
||||
class LangServeExport(TypedDict):
|
||||
"""
|
||||
Fields from pyproject.toml that are relevant to LangServe
|
||||
"""Fields from pyproject.toml that are relevant to LangServe.
|
||||
|
||||
Attributes:
|
||||
module: The module to import from, tool.langserve.export_module
|
||||
attr: The attribute to import from the module, tool.langserve.export_attr
|
||||
package_name: The name of the package, tool.poetry.name
|
||||
|
||||
"""
|
||||
|
||||
module: str
|
||||
@@ -35,11 +36,12 @@ class LangServeExport(TypedDict):
|
||||
|
||||
def get_langserve_export(filepath: Path) -> LangServeExport:
|
||||
with open(filepath) as f:
|
||||
data: Dict[str, Any] = load(f)
|
||||
data: dict[str, Any] = load(f)
|
||||
try:
|
||||
module = data["tool"]["langserve"]["export_module"]
|
||||
attr = data["tool"]["langserve"]["export_attr"]
|
||||
package_name = data["tool"]["poetry"]["name"]
|
||||
except KeyError as e:
|
||||
raise KeyError("Invalid LangServe PyProject.toml") from e
|
||||
msg = "Invalid LangServe PyProject.toml"
|
||||
raise KeyError(msg) from e
|
||||
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import contextlib
|
||||
from collections.abc import Iterable
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, Tuple
|
||||
from typing import Any
|
||||
|
||||
from tomlkit import dump, inline_table, load
|
||||
from tomlkit.items import InlineTable
|
||||
@@ -12,34 +14,34 @@ def _get_dep_inline_table(path: Path) -> InlineTable:
|
||||
|
||||
|
||||
def add_dependencies_to_pyproject_toml(
|
||||
pyproject_toml: Path, local_editable_dependencies: Iterable[Tuple[str, Path]]
|
||||
pyproject_toml: Path,
|
||||
local_editable_dependencies: Iterable[tuple[str, Path]],
|
||||
) -> None:
|
||||
"""Add dependencies to pyproject.toml."""
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
# tomlkit types aren't amazing - treat as Dict instead
|
||||
pyproject: Dict[str, Any] = load(f)
|
||||
pyproject: dict[str, Any] = load(f)
|
||||
pyproject["tool"]["poetry"]["dependencies"].update(
|
||||
{
|
||||
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
|
||||
for name, loc in local_editable_dependencies
|
||||
}
|
||||
},
|
||||
)
|
||||
with open(pyproject_toml, "w", encoding="utf-8") as f:
|
||||
dump(pyproject, f)
|
||||
|
||||
|
||||
def remove_dependencies_from_pyproject_toml(
|
||||
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
|
||||
pyproject_toml: Path,
|
||||
local_editable_dependencies: Iterable[str],
|
||||
) -> None:
|
||||
"""Remove dependencies from pyproject.toml."""
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
pyproject: Dict[str, Any] = load(f)
|
||||
pyproject: dict[str, Any] = load(f)
|
||||
# tomlkit types aren't amazing - treat as Dict instead
|
||||
dependencies = pyproject["tool"]["poetry"]["dependencies"]
|
||||
for name in local_editable_dependencies:
|
||||
try:
|
||||
with contextlib.suppress(KeyError):
|
||||
del dependencies[name]
|
||||
except KeyError:
|
||||
pass
|
||||
with open(pyproject_toml, "w", encoding="utf-8") as f:
|
||||
dump(pyproject, f)
|
||||
|
||||
@@ -30,7 +30,7 @@ langchain-cli = "langchain_cli.cli:app"
|
||||
|
||||
[dependency-groups]
|
||||
dev = ["pytest<8.0.0,>=7.4.2", "pytest-watcher<1.0.0,>=0.3.4"]
|
||||
lint = ["ruff<1.0,>=0.5", "mypy<2.0.0,>=1.13.0"]
|
||||
lint = ["ruff<0.13,>=0.12.2", "mypy<2.0.0,>=1.13.0"]
|
||||
test = ["langchain-core", "langchain"]
|
||||
typing = ["langchain"]
|
||||
test_integration = []
|
||||
@@ -39,13 +39,67 @@ test_integration = []
|
||||
langchain-core = { path = "../core", editable = true }
|
||||
langchain = { path = "../langchain", editable = true }
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py39"
|
||||
exclude = [
|
||||
"langchain_cli/integration_template",
|
||||
"langchain_cli/package_template",
|
||||
"langchain_cli/project_template",
|
||||
]
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
"E", # pycodestyle
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"T201", # print
|
||||
"A", # flake8-builtins
|
||||
"B", # flake8-bugbear
|
||||
"ARG", # flake8-unused-arguments
|
||||
"ASYNC", # flake8-async
|
||||
"C4", # flake8-comprehensions
|
||||
"COM", # flake8-commas
|
||||
"D", # pydocstyle
|
||||
"DOC", # pydoclint
|
||||
"E", # pycodestyle error
|
||||
"EM", # flake8-errmsg
|
||||
"F", # pyflakes
|
||||
"FA", # flake8-future-annotations
|
||||
"FBT", # flake8-boolean-trap
|
||||
"FLY", # flake8-flynt
|
||||
"I", # isort
|
||||
"ICN", # flake8-import-conventions
|
||||
"INT", # flake8-gettext
|
||||
"ISC", # isort-comprehensions
|
||||
"N", # pep8-naming
|
||||
"PT", # flake8-pytest-style
|
||||
"PGH", # pygrep-hooks
|
||||
"PIE", # flake8-pie
|
||||
"PERF", # flake8-perf
|
||||
"PYI", # flake8-pyi
|
||||
"Q", # flake8-quotes
|
||||
"RET", # flake8-return
|
||||
"RSE", # flake8-rst-docstrings
|
||||
"RUF", # ruff
|
||||
"S", # flake8-bandit
|
||||
"SLF", # flake8-self
|
||||
"SLOT", # flake8-slots
|
||||
"SIM", # flake8-simplify
|
||||
"T10", # flake8-debugger
|
||||
"T20", # flake8-print
|
||||
"TID", # flake8-tidy-imports
|
||||
"UP", # pyupgrade
|
||||
"W", # pycodestyle warning
|
||||
"YTT", # flake8-2020
|
||||
]
|
||||
ignore = [
|
||||
"D100", # pydocstyle: Missing docstring in public module
|
||||
"D101", # pydocstyle: Missing docstring in public class
|
||||
"D102", # pydocstyle: Missing docstring in public method
|
||||
"D103", # pydocstyle: Missing docstring in public function
|
||||
"D104", # pydocstyle: Missing docstring in public package
|
||||
"D105", # pydocstyle: Missing docstring in magic method
|
||||
"D107", # pydocstyle: Missing docstring in __init__
|
||||
"D407", # pydocstyle: Missing-dashed-underline-after-section
|
||||
"COM812", # Messes with the formatter
|
||||
]
|
||||
pyupgrade.keep-runtime-typing = true
|
||||
|
||||
[tool.mypy]
|
||||
exclude = [
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# type: ignore
|
||||
"""Script to generate migrations for the migration script."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import pkgutil
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
@@ -19,9 +19,8 @@ from langchain_cli.namespaces.migrate.generate.partner import (
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
def cli() -> None:
|
||||
"""Migration scripts management."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@@ -45,12 +44,17 @@ def cli():
|
||||
)
|
||||
@click.option(
|
||||
"--format",
|
||||
"format_",
|
||||
type=click.Choice(["json", "grit"], case_sensitive=False),
|
||||
default="json",
|
||||
help="The output format for the migration script (json or grit).",
|
||||
)
|
||||
def generic(
|
||||
pkg1: str, pkg2: str, output: str, filter_by_all: bool, format: str
|
||||
pkg1: str,
|
||||
pkg2: str,
|
||||
output: str,
|
||||
filter_by_all: bool, # noqa: FBT001
|
||||
format_: str,
|
||||
) -> None:
|
||||
"""Generate a migration script."""
|
||||
click.echo("Migration script generated.")
|
||||
@@ -62,9 +66,9 @@ def generic(
|
||||
name = f"{pkg1}_to_{pkg2}"
|
||||
|
||||
if output is None:
|
||||
output = f"{name}.json" if format == "json" else f"{name}.grit"
|
||||
output = f"{name}.json" if format_ == "json" else f"{name}.grit"
|
||||
|
||||
if format == "json":
|
||||
if format_ == "json":
|
||||
dumped = json.dumps(migrations, indent=2, sort_keys=True)
|
||||
else:
|
||||
dumped = dump_migrations_as_grit(name, migrations)
|
||||
@@ -73,7 +77,7 @@ def generic(
|
||||
f.write(dumped)
|
||||
|
||||
|
||||
def handle_partner(pkg: str, output: str = None):
|
||||
def handle_partner(pkg: str, output: Optional[str] = None) -> None:
|
||||
migrations = get_migrations_for_partner_package(pkg)
|
||||
# Run with python 3.9+
|
||||
name = pkg.removeprefix("langchain_")
|
||||
@@ -100,7 +104,7 @@ def partner(pkg: str, output: str) -> None:
|
||||
@click.argument("json_file")
|
||||
def json_to_grit(json_file: str) -> None:
|
||||
"""Generate a Grit migration from an old JSON migration file."""
|
||||
with open(json_file, "r") as f:
|
||||
with open(json_file) as f:
|
||||
migrations = json.load(f)
|
||||
name = os.path.basename(json_file).removesuffix(".json").removesuffix(".grit")
|
||||
data = dump_migrations_as_grit(name, migrations)
|
||||
|
||||
@@ -4,4 +4,3 @@ import pytest
|
||||
@pytest.mark.compile
|
||||
def test_placeholder() -> None:
|
||||
"""Used for compiling integration tests without running any real tests."""
|
||||
pass
|
||||
|
||||
@@ -6,7 +6,7 @@ class File:
|
||||
self.name = name
|
||||
self.content = "\n".join(content or [])
|
||||
|
||||
def __eq__(self, __value: object) -> bool:
|
||||
def __eq__(self, __value: object, /) -> bool:
|
||||
if not isinstance(__value, File):
|
||||
return NotImplemented
|
||||
|
||||
|
||||
@@ -34,12 +34,12 @@ class Folder:
|
||||
files.append(cls.from_structure(path))
|
||||
else:
|
||||
files.append(
|
||||
File(path.name, path.read_text(encoding="utf-8").splitlines())
|
||||
File(path.name, path.read_text(encoding="utf-8").splitlines()),
|
||||
)
|
||||
|
||||
return Folder(name, *files)
|
||||
|
||||
def __eq__(self, __value: object) -> bool:
|
||||
def __eq__(self, __value: object, /) -> bool:
|
||||
if isinstance(__value, File):
|
||||
return False
|
||||
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
# ruff: noqa: E402
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
pytest.importorskip("gritql")
|
||||
|
||||
import difflib
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from typer.testing import CliRunner
|
||||
|
||||
from langchain_cli.cli import app
|
||||
from tests.unit_tests.migrate.cli_runner.cases import before, expected
|
||||
from tests.unit_tests.migrate.cli_runner.folder import Folder
|
||||
|
||||
pytest.importorskip("gritql")
|
||||
|
||||
|
||||
def find_issue(current: Folder, expected: Folder) -> str:
|
||||
for current_file, expected_file in zip(current.files, expected.files):
|
||||
@@ -25,7 +23,7 @@ def find_issue(current: Folder, expected: Folder) -> str:
|
||||
)
|
||||
if isinstance(current_file, Folder) and isinstance(expected_file, Folder):
|
||||
return find_issue(current_file, expected_file)
|
||||
elif isinstance(current_file, Folder) or isinstance(expected_file, Folder):
|
||||
if isinstance(current_file, Folder) or isinstance(expected_file, Folder):
|
||||
return (
|
||||
f"One of the files is a "
|
||||
f"folder: {current_file.name} != {expected_file.name}"
|
||||
@@ -36,7 +34,7 @@ def find_issue(current: Folder, expected: Folder) -> str:
|
||||
expected_file.content.splitlines(),
|
||||
fromfile=current_file.name,
|
||||
tofile=expected_file.name,
|
||||
)
|
||||
),
|
||||
)
|
||||
return "Unknown"
|
||||
|
||||
@@ -49,8 +47,10 @@ def test_command_line(tmp_path: Path) -> None:
|
||||
before.create_structure(root=Path(td))
|
||||
# The input is used to force through the confirmation.
|
||||
result = runner.invoke(app, ["migrate", before.name, "--force"])
|
||||
assert result.exit_code == 0, result.output
|
||||
if result.exit_code != 0:
|
||||
raise RuntimeError(result.output)
|
||||
|
||||
after = Folder.from_structure(Path(td) / before.name)
|
||||
|
||||
assert after == expected, find_issue(after, expected)
|
||||
if after != expected:
|
||||
raise ValueError(find_issue(after, expected))
|
||||
|
||||
@@ -10,49 +10,54 @@ from langchain_cli.namespaces.migrate.generate.generic import (
|
||||
@pytest.mark.xfail(reason="Unknown reason")
|
||||
def test_create_json_agent_migration() -> None:
|
||||
"""Test the migration of create_json_agent from langchain to langchain_community."""
|
||||
with sup1():
|
||||
with sup2():
|
||||
raw_migrations = generate_simplified_migrations(
|
||||
from_package="langchain", to_package="langchain_community"
|
||||
)
|
||||
json_agent_migrations = [
|
||||
migration
|
||||
for migration in raw_migrations
|
||||
if "create_json_agent" in migration[0]
|
||||
]
|
||||
assert json_agent_migrations == [
|
||||
(
|
||||
"langchain.agents.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
(
|
||||
"langchain.agents.agent_toolkits.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
(
|
||||
"langchain.agents.agent_toolkits.json.base.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
]
|
||||
with sup1(), sup2():
|
||||
raw_migrations = generate_simplified_migrations(
|
||||
from_package="langchain",
|
||||
to_package="langchain_community",
|
||||
)
|
||||
json_agent_migrations = [
|
||||
migration
|
||||
for migration in raw_migrations
|
||||
if "create_json_agent" in migration[0]
|
||||
]
|
||||
if json_agent_migrations != [
|
||||
(
|
||||
"langchain.agents.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
(
|
||||
"langchain.agents.agent_toolkits.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
(
|
||||
"langchain.agents.agent_toolkits.json.base.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
]:
|
||||
msg = "json_agent_migrations did not match the expected value"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="Unknown reason")
|
||||
def test_create_single_store_retriever_db() -> None:
|
||||
"""Test migration from langchain to langchain_core"""
|
||||
with sup1():
|
||||
with sup2():
|
||||
raw_migrations = generate_simplified_migrations(
|
||||
from_package="langchain", to_package="langchain_core"
|
||||
"""Test migration from langchain to langchain_core."""
|
||||
with sup1(), sup2():
|
||||
raw_migrations = generate_simplified_migrations(
|
||||
from_package="langchain",
|
||||
to_package="langchain_core",
|
||||
)
|
||||
# SingleStore was an old name for VectorStoreRetriever
|
||||
single_store_migration = [
|
||||
migration for migration in raw_migrations if "SingleStore" in migration[0]
|
||||
]
|
||||
if single_store_migration != [
|
||||
(
|
||||
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
|
||||
"langchain_core.vectorstores.VectorStoreRetriever",
|
||||
),
|
||||
]:
|
||||
msg = (
|
||||
"Unexpected migration: single_store_migration does not match expected "
|
||||
"value"
|
||||
)
|
||||
# SingleStore was an old name for VectorStoreRetriever
|
||||
single_store_migration = [
|
||||
migration
|
||||
for migration in raw_migrations
|
||||
if "SingleStore" in migration[0]
|
||||
]
|
||||
assert single_store_migration == [
|
||||
(
|
||||
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
|
||||
"langchain_core.vectorstores.VectorStoreRetriever",
|
||||
),
|
||||
]
|
||||
raise ValueError(msg)
|
||||
|
||||
@@ -9,7 +9,7 @@ pytest.importorskip(modname="langchain_openai")
|
||||
|
||||
def test_generate_migrations() -> None:
|
||||
migrations = get_migrations_for_partner_package("langchain_openai")
|
||||
assert migrations == [
|
||||
if migrations != [
|
||||
("langchain_community.llms.openai.OpenAI", "langchain_openai.OpenAI"),
|
||||
("langchain_community.llms.openai.AzureOpenAI", "langchain_openai.AzureOpenAI"),
|
||||
(
|
||||
@@ -43,4 +43,6 @@ def test_generate_migrations() -> None:
|
||||
"langchain_openai.AzureChatOpenAI",
|
||||
),
|
||||
("langchain_community.chat_models.ChatOpenAI", "langchain_openai.ChatOpenAI"),
|
||||
]
|
||||
]:
|
||||
msg = "Migrations do not match expected result"
|
||||
raise ValueError(msg)
|
||||
|
||||
@@ -2,4 +2,6 @@ from langchain_cli.namespaces.migrate.generate.utils import PKGS_ROOT
|
||||
|
||||
|
||||
def test_root() -> None:
|
||||
assert PKGS_ROOT.name == "libs"
|
||||
if PKGS_ROOT.name != "libs":
|
||||
msg = "Expected PKGS_ROOT.name to be 'libs'."
|
||||
raise ValueError(msg)
|
||||
|
||||
@@ -5,6 +5,7 @@ from langchain_cli.utils.events import EventDict, create_events
|
||||
|
||||
@pytest.mark.xfail(reason="Unknown reason")
|
||||
def test_create_events() -> None:
|
||||
assert create_events(
|
||||
[EventDict(event="Test Event", properties={"test": "test"})]
|
||||
) == {"status": "success"}
|
||||
result = create_events([EventDict(event="Test Event", properties={"test": "test"})])
|
||||
if result != {"status": "success"}:
|
||||
msg = "Expected {'status': 'success'}, got " + repr(result)
|
||||
raise ValueError(msg)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Dict, Optional
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -16,19 +16,39 @@ def _assert_dependency_equals(
|
||||
git: Optional[str] = None,
|
||||
ref: Optional[str] = None,
|
||||
subdirectory: Optional[str] = None,
|
||||
event_metadata: Optional[Dict] = None,
|
||||
event_metadata: Optional[dict] = None,
|
||||
) -> None:
|
||||
assert dep["git"] == git
|
||||
assert dep["ref"] == ref
|
||||
assert dep["subdirectory"] == subdirectory
|
||||
if event_metadata is not None:
|
||||
assert dep["event_metadata"] == event_metadata
|
||||
if dep["git"] != git:
|
||||
msg = f"Expected git to be {git} but got {dep['git']}"
|
||||
raise ValueError(msg)
|
||||
if dep["ref"] != ref:
|
||||
msg = f"Expected ref to be {ref} but got {dep['ref']}"
|
||||
raise ValueError(msg)
|
||||
if dep["subdirectory"] != subdirectory:
|
||||
msg = (
|
||||
f"Expected subdirectory to be {subdirectory} but got {dep['subdirectory']}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if dep["subdirectory"] != subdirectory:
|
||||
msg = (
|
||||
f"Expected subdirectory to be {subdirectory} but got {dep['subdirectory']}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if event_metadata is not None and dep["event_metadata"] != event_metadata:
|
||||
msg = (
|
||||
f"Expected event_metadata to be {event_metadata} "
|
||||
f"but got {dep['event_metadata']}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def test_dependency_string() -> None:
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com/efriis/myrepo.git", None, None, None
|
||||
"git+ssh://git@github.com/efriis/myrepo.git",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com/efriis/myrepo.git",
|
||||
ref=None,
|
||||
@@ -49,7 +69,10 @@ def test_dependency_string() -> None:
|
||||
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com:efriis/myrepo.git#develop", None, None, None
|
||||
"git+ssh://git@github.com:efriis/myrepo.git#develop",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com:efriis/myrepo.git",
|
||||
ref="develop",
|
||||
@@ -59,7 +82,10 @@ def test_dependency_string() -> None:
|
||||
# also support a slash in ssh
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com/efriis/myrepo.git#develop", None, None, None
|
||||
"git+ssh://git@github.com/efriis/myrepo.git#develop",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com/efriis/myrepo.git",
|
||||
ref="develop",
|
||||
@@ -69,7 +95,10 @@ def test_dependency_string() -> None:
|
||||
# looks like poetry supports both an @ and a #
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com:efriis/myrepo.git@develop", None, None, None
|
||||
"git+ssh://git@github.com:efriis/myrepo.git@develop",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com:efriis/myrepo.git",
|
||||
ref="develop",
|
||||
@@ -100,7 +129,8 @@ def test_dependency_string_both() -> None:
|
||||
|
||||
def test_dependency_string_invalids() -> None:
|
||||
# expect error for wrong order
|
||||
with pytest.raises(ValueError):
|
||||
# Bypassing validation since the ValueError message is dynamic
|
||||
with pytest.raises(ValueError): # noqa: PT011
|
||||
parse_dependency_string(
|
||||
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch",
|
||||
None,
|
||||
|
||||
72
libs/cli/uv.lock
generated
72
libs/cli/uv.lock
generated
@@ -327,6 +327,7 @@ source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/de/f6/9d485d595a049feabc5bd85c37713b2c1509aa61448fb737f791081624f3/gritql-0.2.0.tar.gz", hash = "sha256:09e26e3d3152d3ec2e4fa80c0af4f2fe1436c82a2c6343cec6ab74ae61474bae", size = 3780 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/22/45/f3bfad1cf462d85a9ba3be5cb8b7fd5e6bf4e67f7a24cf00ae53ee6da86e/gritql-0.2.0-py2.py3-none-any.whl", hash = "sha256:6a37f4a6388c09801c25de8753546ca11d4b8a3ad527742821eb032ad069cd13", size = 5162 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/19/852daaa9273667e5d9f8bbe007b1d26ad1f465a72f6451f7f3bb4e86eca8/gritql-0.2.0-py3-none-any.whl", hash = "sha256:de95dd6d027184b0c388f1b8864c0c71fa889154c55d360f427adfd00b28ac9c", size = 4827 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -407,7 +408,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.3.24"
|
||||
version = "0.3.26"
|
||||
source = { editable = "../langchain" }
|
||||
dependencies = [
|
||||
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
|
||||
@@ -442,7 +443,7 @@ requires-dist = [
|
||||
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
||||
{ name = "langchain-together", marker = "extra == 'together'" },
|
||||
{ name = "langchain-xai", marker = "extra == 'xai'" },
|
||||
{ name = "langsmith", specifier = ">=0.1.17,<0.4" },
|
||||
{ name = "langsmith", specifier = ">=0.1.17" },
|
||||
{ name = "pydantic", specifier = ">=2.7.4,<3.0.0" },
|
||||
{ name = "pyyaml", specifier = ">=5.3" },
|
||||
{ name = "requests", specifier = ">=2,<3" },
|
||||
@@ -497,15 +498,15 @@ test-integration = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
||||
{ name = "langchainhub", specifier = ">=0.1.16,<1.0.0" },
|
||||
{ name = "pytest-vcr", specifier = ">=1.0.2,<2.0.0" },
|
||||
{ name = "python-dotenv", specifier = ">=1.0.0,<2.0.0" },
|
||||
{ name = "urllib3", marker = "python_full_version < '3.10'", specifier = "<2" },
|
||||
{ name = "vcrpy", specifier = ">=7.0" },
|
||||
{ name = "wrapt", specifier = ">=1.15.0,<2.0.0" },
|
||||
]
|
||||
typing = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "langchain-text-splitters", editable = "../text-splitters" },
|
||||
{ name = "mypy", specifier = ">=1.15,<2.0" },
|
||||
{ name = "mypy", specifier = ">=1.15,<1.16" },
|
||||
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
|
||||
{ name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" },
|
||||
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
|
||||
@@ -564,7 +565,7 @@ dev = [
|
||||
]
|
||||
lint = [
|
||||
{ name = "mypy", specifier = ">=1.13.0,<2.0.0" },
|
||||
{ name = "ruff", specifier = ">=0.5,<1.0" },
|
||||
{ name = "ruff", specifier = ">=0.12.2,<0.13" },
|
||||
]
|
||||
test = [
|
||||
{ name = "langchain", editable = "../langchain" },
|
||||
@@ -575,7 +576,7 @@ typing = [{ name = "langchain", editable = "../langchain" }]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.56"
|
||||
version = "0.3.68"
|
||||
source = { editable = "../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -590,10 +591,9 @@ dependencies = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "jsonpatch", specifier = ">=1.33,<2.0" },
|
||||
{ name = "langsmith", specifier = ">=0.1.125,<0.4" },
|
||||
{ name = "langsmith", specifier = ">=0.3.45" },
|
||||
{ name = "packaging", specifier = ">=23.2,<25" },
|
||||
{ name = "pydantic", marker = "python_full_version < '3.12.4'", specifier = ">=2.5.2,<3.0.0" },
|
||||
{ name = "pydantic", marker = "python_full_version >= '3.12.4'", specifier = ">=2.7.4,<3.0.0" },
|
||||
{ name = "pydantic", specifier = ">=2.7.4" },
|
||||
{ name = "pyyaml", specifier = ">=5.3" },
|
||||
{ name = "tenacity", specifier = ">=8.1.0,!=8.4.0,<10.0.0" },
|
||||
{ name = "typing-extensions", specifier = ">=4.7" },
|
||||
@@ -634,7 +634,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-text-splitters"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = { editable = "../text-splitters" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@@ -664,10 +664,10 @@ test = [
|
||||
]
|
||||
test-integration = [
|
||||
{ name = "nltk", specifier = ">=3.9.1,<4.0.0" },
|
||||
{ name = "sentence-transformers", marker = "python_full_version < '3.13'", specifier = ">=2.6.0" },
|
||||
{ name = "spacy", marker = "python_full_version < '3.10'", specifier = ">=3.0.0,<3.8.4" },
|
||||
{ name = "spacy", marker = "python_full_version < '3.13'", specifier = ">=3.0.0,<4.0.0" },
|
||||
{ name = "transformers", specifier = ">=4.47.0,<5.0.0" },
|
||||
{ name = "sentence-transformers", specifier = ">=3.0.1" },
|
||||
{ name = "spacy", specifier = ">=3.8.7,<4.0.0" },
|
||||
{ name = "thinc", specifier = ">=8.3.6,<9.0.0" },
|
||||
{ name = "transformers", specifier = ">=4.51.3,<5.0.0" },
|
||||
]
|
||||
typing = [
|
||||
{ name = "lxml-stubs", specifier = ">=0.5.1,<1.0.0" },
|
||||
@@ -699,7 +699,7 @@ all = [
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.3.37"
|
||||
version = "0.4.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
@@ -710,9 +710,9 @@ dependencies = [
|
||||
{ name = "requests-toolbelt" },
|
||||
{ name = "zstandard" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7b/d0/98daffe57c57c2f44c5d363df5004d8e530b8c9b15751f451d273fd1d4c8/langsmith-0.3.37.tar.gz", hash = "sha256:d49d9a12d24d3984d5b3e2b5915b525b4a29a4706ea9cadde43c980fba43fab0", size = 344645 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/20/c8/8d2e0fc438d2d3d8d4300f7684ea30a754344ed00d7ba9cc2705241d2a5f/langsmith-0.4.4.tar.gz", hash = "sha256:70c53bbff24a7872e88e6fa0af98270f4986a6e364f9e85db1cc5636defa4d66", size = 352105 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/50/f2/5700dbeec7dca0aa57a6ed2f472fa3a323b46c85ab2bc446b2c7c8fb599e/langsmith-0.3.37-py3-none-any.whl", hash = "sha256:bdecca4eb48ba1799e821a33dbdca318ab202faa71a5bfa7d2358be6c3fd7eeb", size = 359308 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/33/a3337eb70d795495a299a1640d7a75f17fb917155a64309b96106e7b9452/langsmith-0.4.4-py3-none-any.whl", hash = "sha256:014c68329bd085bd6c770a6405c61bb6881f82eb554ce8c4d1984b0035fd1716", size = 367687 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1135,27 +1135,27 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.5.7"
|
||||
version = "0.12.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bf/2b/69e5e412f9d390adbdbcbf4f64d6914fa61b44b08839a6584655014fc524/ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5", size = 2449817 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/3d/d9a195676f25d00dbfcf3cf95fdd4c685c497fcfa7e862a44ac5e4e96480/ruff-0.12.2.tar.gz", hash = "sha256:d7b4f55cd6f325cb7621244f19c873c565a08aff5a4ba9c69aa7355f3f7afd3e", size = 4432239 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/eb/06e06aaf96af30a68e83b357b037008c54a2ddcbad4f989535007c700394/ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a", size = 9570571 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/10/1be32aeaab8728f78f673e7a47dd813222364479b2d6573dbcf0085e83ea/ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be", size = 8685138 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/1d/c218ce83beb4394ba04d05e9aa2ae6ce9fba8405688fe878b0fdb40ce855/ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e", size = 8266785 },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/79/7f49509bd844476235b40425756def366b227a9714191c91f02fb2178635/ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8", size = 9983964 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/b1/939836b70bf9fcd5e5cd3ea67fdb8abb9eac7631351d32f26544034a35e4/ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea", size = 9359490 },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/7d/b3db19207de105daad0c8b704b2c6f2a011f9c07017bd58d8d6e7b8eba19/ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc", size = 10170833 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/45/eae9da55f3357a1ac04220230b8b07800bf516e6dd7e1ad20a2ff3b03b1b/ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692", size = 10896360 },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/67/4388b36d145675f4c51ebec561fcd4298a0e2550c81e629116f83ce45a39/ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf", size = 10477094 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/9c/f5e6ed1751dc187a4ecf19a4970dd30a521c0ee66b7941c16e292a4043fb/ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb", size = 11480896 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/3b/2b683be597bbd02046678fc3fc1c199c641512b20212073b58f173822bb3/ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e", size = 10179702 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/38/c2d94054dc4b3d1ea4c2ba3439b2a7095f08d1c8184bc41e6abe2a688be7/ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499", size = 9982855 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/e7/1433db2da505ffa8912dcf5b28a8743012ee780cbc20ad0bf114787385d9/ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e", size = 9433156 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/36/4fa43250e67741edeea3d366f59a1dc993d4d89ad493a36cbaa9889895f2/ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5", size = 9782971 },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/0e/8c276103d518e5cf9202f70630aaa494abf6fc71c04d87c08b6d3cd07a4b/ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e", size = 10247775 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/b9/673096d61276f39291b729dddde23c831a5833d98048349835782688a0ec/ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a", size = 7841772 },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/1c/4520c98bfc06b9c73cd1457686d4d3935d40046b1ddea08403e5a6deff51/ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3", size = 8699779 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/23/b3763a237d2523d40a31fe2d1a301191fe392dd48d3014977d079cf8c0bd/ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4", size = 8091891 },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/b6/2098d0126d2d3318fd5bec3ad40d06c25d377d95749f7a0c5af17129b3b1/ruff-0.12.2-py3-none-linux_armv6l.whl", hash = "sha256:093ea2b221df1d2b8e7ad92fc6ffdca40a2cb10d8564477a987b44fd4008a7be", size = 10369761 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/4b/5da0142033dbe155dc598cfb99262d8ee2449d76920ea92c4eeb9547c208/ruff-0.12.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:09e4cf27cc10f96b1708100fa851e0daf21767e9709e1649175355280e0d950e", size = 11155659 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/21/967b82550a503d7c5c5c127d11c935344b35e8c521f52915fc858fb3e473/ruff-0.12.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8ae64755b22f4ff85e9c52d1f82644abd0b6b6b6deedceb74bd71f35c24044cc", size = 10537769 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/91/00cff7102e2ec71a4890fb7ba1803f2cdb122d82787c7d7cf8041fe8cbc1/ruff-0.12.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eb3a6b2db4d6e2c77e682f0b988d4d61aff06860158fdb413118ca133d57922", size = 10717602 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/eb/928814daec4e1ba9115858adcda44a637fb9010618721937491e4e2283b8/ruff-0.12.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:73448de992d05517170fc37169cbca857dfeaeaa8c2b9be494d7bcb0d36c8f4b", size = 10198772 },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/fa/f15089bc20c40f4f72334f9145dde55ab2b680e51afb3b55422effbf2fb6/ruff-0.12.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b8b94317cbc2ae4a2771af641739f933934b03555e51515e6e021c64441532d", size = 11845173 },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/9f/1f6f98f39f2b9302acc161a4a2187b1e3a97634fe918a8e731e591841cf4/ruff-0.12.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45fc42c3bf1d30d2008023a0a9a0cfb06bf9835b147f11fe0679f21ae86d34b1", size = 12553002 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/70/08991ac46e38ddd231c8f4fd05ef189b1b94be8883e8c0c146a025c20a19/ruff-0.12.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce48f675c394c37e958bf229fb5c1e843e20945a6d962cf3ea20b7a107dcd9f4", size = 12171330 },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/a9/5a55266fec474acfd0a1c73285f19dd22461d95a538f29bba02edd07a5d9/ruff-0.12.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793d8859445ea47591272021a81391350205a4af65a9392401f418a95dfb75c9", size = 11774717 },
|
||||
{ url = "https://files.pythonhosted.org/packages/87/e5/0c270e458fc73c46c0d0f7cf970bb14786e5fdb88c87b5e423a4bd65232b/ruff-0.12.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6932323db80484dda89153da3d8e58164d01d6da86857c79f1961934354992da", size = 11646659 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/b6/45ab96070c9752af37f0be364d849ed70e9ccede07675b0ec4e3ef76b63b/ruff-0.12.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6aa7e623a3a11538108f61e859ebf016c4f14a7e6e4eba1980190cacb57714ce", size = 10604012 },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/91/26a6e6a424eb147cc7627eebae095cfa0b4b337a7c1c413c447c9ebb72fd/ruff-0.12.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2a4a20aeed74671b2def096bdf2eac610c7d8ffcbf4fb0e627c06947a1d7078d", size = 10176799 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/0c/9f344583465a61c8918a7cda604226e77b2c548daf8ef7c2bfccf2b37200/ruff-0.12.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:71a4c550195612f486c9d1f2b045a600aeba851b298c667807ae933478fcef04", size = 11241507 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/b7/99c34ded8fb5f86c0280278fa89a0066c3760edc326e935ce0b1550d315d/ruff-0.12.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4987b8f4ceadf597c927beee65a5eaf994c6e2b631df963f86d8ad1bdea99342", size = 11717609 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/de/8589fa724590faa057e5a6d171e7f2f6cffe3287406ef40e49c682c07d89/ruff-0.12.2-py3-none-win32.whl", hash = "sha256:369ffb69b70cd55b6c3fc453b9492d98aed98062db9fec828cdfd069555f5f1a", size = 10523823 },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/47/8abf129102ae4c90cba0c2199a1a9b0fa896f6f806238d6f8c14448cc748/ruff-0.12.2-py3-none-win_amd64.whl", hash = "sha256:dca8a3b6d6dc9810ed8f328d406516bf4d660c00caeaef36eb831cf4871b0639", size = 11629831 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/1f/72d2946e3cc7456bb837e88000eb3437e55f80db339c840c04015a11115d/ruff-0.12.2-py3-none-win_arm64.whl", hash = "sha256:48d6c6bfb4761df68bc05ae630e24f506755e702d4fb08f08460be778c7ccb12", size = 10735334 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -469,7 +469,7 @@ def warn_deprecated(
|
||||
|
||||
if not message:
|
||||
message = ""
|
||||
_package = (
|
||||
package_ = (
|
||||
package or name.split(".")[0].replace("_", "-")
|
||||
if "." in name
|
||||
else "LangChain"
|
||||
@@ -483,14 +483,14 @@ def warn_deprecated(
|
||||
if pending:
|
||||
message += " will be deprecated in a future version"
|
||||
else:
|
||||
message += f" was deprecated in {_package} {since}"
|
||||
message += f" was deprecated in {package_} {since}"
|
||||
|
||||
if removal:
|
||||
message += f" and will be removed {removal}"
|
||||
|
||||
if alternative_import:
|
||||
alt_package = alternative_import.split(".")[0].replace("_", "-")
|
||||
if alt_package == _package:
|
||||
if alt_package == package_:
|
||||
message += f". Use {alternative_import} instead."
|
||||
else:
|
||||
alt_module, alt_name = alternative_import.rsplit(".", 1)
|
||||
|
||||
@@ -194,7 +194,7 @@ class InMemoryCache(BaseCache):
|
||||
"""
|
||||
if self._maxsize is not None and len(self._cache) == self._maxsize:
|
||||
del self._cache[next(iter(self._cache))]
|
||||
self._cache[(prompt, llm_string)] = return_val
|
||||
self._cache[prompt, llm_string] = return_val
|
||||
|
||||
@override
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
|
||||
@@ -243,9 +243,10 @@ class CallbackManagerMixin:
|
||||
) -> Any:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
**ATTENTION**: This method is called for non-chat models (regular LLMs). If
|
||||
you're implementing a handler for a chat model,
|
||||
you should use on_chat_model_start instead.
|
||||
.. ATTENTION::
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
@@ -271,7 +272,7 @@ class CallbackManagerMixin:
|
||||
"""Run when a chat model starts running.
|
||||
|
||||
**ATTENTION**: This method is called for chat models. If you're implementing
|
||||
a handler for a non-chat model, you should use on_llm_start instead.
|
||||
a handler for a non-chat model, you should use ``on_llm_start`` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
@@ -490,9 +491,10 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
) -> None:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
**ATTENTION**: This method is called for non-chat models (regular LLMs). If
|
||||
you're implementing a handler for a chat model,
|
||||
you should use on_chat_model_start instead.
|
||||
.. ATTENTION::
|
||||
This method is called for non-chat models (regular LLMs). If you're
|
||||
implementing a handler for a chat model, you should use
|
||||
``on_chat_model_start`` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized LLM.
|
||||
@@ -518,7 +520,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when a chat model starts running.
|
||||
|
||||
**ATTENTION**: This method is called for chat models. If you're implementing
|
||||
a handler for a non-chat model, you should use on_llm_start instead.
|
||||
a handler for a non-chat model, you should use ``on_llm_start`` instead.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chat model.
|
||||
|
||||
@@ -5,8 +5,9 @@ from __future__ import annotations
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
|
||||
|
||||
from typing_extensions import override
|
||||
from typing_extensions import Self, override
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_core.utils.input import print_text
|
||||
|
||||
@@ -14,78 +15,184 @@ if TYPE_CHECKING:
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
|
||||
|
||||
_GLOBAL_DEPRECATION_WARNED = False
|
||||
|
||||
|
||||
class FileCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that writes to a file.
|
||||
|
||||
Parameters:
|
||||
filename: The file to write to.
|
||||
mode: The mode to open the file in. Defaults to "a".
|
||||
color: The color to use for the text.
|
||||
This handler supports both context manager usage (recommended) and direct
|
||||
instantiation (deprecated) for backwards compatibility.
|
||||
|
||||
Examples:
|
||||
Using as a context manager (recommended):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with FileCallbackHandler("output.txt") as handler:
|
||||
# Use handler with your chain/agent
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
|
||||
Direct instantiation (deprecated):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
handler = FileCallbackHandler("output.txt")
|
||||
# File remains open until handler is garbage collected
|
||||
try:
|
||||
chain.invoke(inputs, config={"callbacks": [handler]})
|
||||
finally:
|
||||
handler.close() # Explicit cleanup recommended
|
||||
|
||||
Args:
|
||||
filename: The file path to write to.
|
||||
mode: The file open mode. Defaults to ``'a'`` (append).
|
||||
color: Default color for text output. Defaults to ``None``.
|
||||
|
||||
Note:
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
on first use. The file will be opened immediately in ``__init__`` and closed
|
||||
in ``__del__`` or when ``close()`` is called explicitly.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, filename: str, mode: str = "a", color: Optional[str] = None
|
||||
) -> None:
|
||||
"""Initialize callback handler.
|
||||
"""Initialize the file callback handler.
|
||||
|
||||
Args:
|
||||
filename: The filename to write to.
|
||||
mode: The mode to open the file in. Defaults to "a".
|
||||
color: The color to use for the text. Defaults to None.
|
||||
filename: Path to the output file.
|
||||
mode: File open mode (e.g., ``'w'``, ``'a'``, ``'x'``). Defaults to ``'a'``.
|
||||
color: Default text color for output. Defaults to ``None``.
|
||||
"""
|
||||
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
|
||||
self.filename = filename
|
||||
self.mode = mode
|
||||
self.color = color
|
||||
self._file_opened_in_context = False
|
||||
self.file: TextIO = cast(
|
||||
"TextIO",
|
||||
# Open the file in the specified mode with UTF-8 encoding.
|
||||
Path(self.filename).open(self.mode, encoding="utf-8"), # noqa: SIM115
|
||||
)
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
"""Enter the context manager.
|
||||
|
||||
Returns:
|
||||
The FileCallbackHandler instance.
|
||||
|
||||
Note:
|
||||
The file is already opened in ``__init__``, so this just marks that
|
||||
the handler is being used as a context manager.
|
||||
"""
|
||||
self._file_opened_in_context = True
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: object,
|
||||
) -> None:
|
||||
"""Exit the context manager and close the file.
|
||||
|
||||
Args:
|
||||
exc_type: Exception type if an exception occurred.
|
||||
exc_val: Exception value if an exception occurred.
|
||||
exc_tb: Exception traceback if an exception occurred.
|
||||
"""
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
"""Destructor to cleanup when done."""
|
||||
self.file.close()
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the file if it's open.
|
||||
|
||||
This method is safe to call multiple times and will only close
|
||||
the file if it's currently open.
|
||||
"""
|
||||
if hasattr(self, "file") and self.file and not self.file.closed:
|
||||
self.file.close()
|
||||
|
||||
def _write(
|
||||
self,
|
||||
text: str,
|
||||
color: Optional[str] = None,
|
||||
end: str = "",
|
||||
) -> None:
|
||||
"""Write text to the file with deprecation warning if needed.
|
||||
|
||||
Args:
|
||||
text: The text to write to the file.
|
||||
color: Optional color for the text. Defaults to ``self.color``.
|
||||
end: String appended after the text. Defaults to ``""``.
|
||||
file: Optional file to write to. Defaults to ``self.file``.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the file is closed or not available.
|
||||
"""
|
||||
global _GLOBAL_DEPRECATION_WARNED # noqa: PLW0603
|
||||
if not self._file_opened_in_context and not _GLOBAL_DEPRECATION_WARNED:
|
||||
warn_deprecated(
|
||||
since="0.3.67",
|
||||
pending=True,
|
||||
message=(
|
||||
"Using FileCallbackHandler without a context manager is "
|
||||
"deprecated. Use 'with FileCallbackHandler(...) as "
|
||||
"handler:' instead."
|
||||
),
|
||||
)
|
||||
_GLOBAL_DEPRECATION_WARNED = True
|
||||
|
||||
if not hasattr(self, "file") or self.file is None or self.file.closed:
|
||||
msg = "File is not open. Use FileCallbackHandler as a context manager."
|
||||
raise RuntimeError(msg)
|
||||
|
||||
print_text(text, file=self.file, color=color, end=end)
|
||||
|
||||
@override
|
||||
def on_chain_start(
|
||||
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Print out that we are entering a chain.
|
||||
"""Print that we are entering a chain.
|
||||
|
||||
Args:
|
||||
serialized (dict[str, Any]): The serialized chain.
|
||||
inputs (dict[str, Any]): The inputs to the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
serialized: The serialized chain information.
|
||||
inputs: The inputs to the chain.
|
||||
**kwargs: Additional keyword arguments that may contain ``'name'``.
|
||||
"""
|
||||
if "name" in kwargs:
|
||||
name = kwargs["name"]
|
||||
elif serialized:
|
||||
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
else:
|
||||
name = "<unknown>"
|
||||
print_text(
|
||||
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
|
||||
end="\n",
|
||||
file=self.file,
|
||||
name = (
|
||||
kwargs.get("name")
|
||||
or serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
or "<unknown>"
|
||||
)
|
||||
self._write(f"\n\n> Entering new {name} chain...", end="\n")
|
||||
|
||||
@override
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Print out that we finished a chain.
|
||||
"""Print that we finished a chain.
|
||||
|
||||
Args:
|
||||
outputs (dict[str, Any]): The outputs of the chain.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
outputs: The outputs of the chain.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
|
||||
self._write("\n> Finished chain.", end="\n")
|
||||
|
||||
@override
|
||||
def on_agent_action(
|
||||
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Run on agent action.
|
||||
"""Handle agent action by writing the action log.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
color (Optional[str], optional): The color to use for the text.
|
||||
Defaults to None.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
action: The agent action containing the log to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(action.log, color=color or self.color, file=self.file)
|
||||
self._write(action.log, color=color or self.color)
|
||||
|
||||
@override
|
||||
def on_tool_end(
|
||||
@@ -96,49 +203,47 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
llm_prefix: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""If not the final action, print out observation.
|
||||
"""Handle tool end by writing the output with optional prefixes.
|
||||
|
||||
Args:
|
||||
output (str): The output to print.
|
||||
color (Optional[str], optional): The color to use for the text.
|
||||
Defaults to None.
|
||||
observation_prefix (Optional[str], optional): The observation prefix.
|
||||
Defaults to None.
|
||||
llm_prefix (Optional[str], optional): The LLM prefix.
|
||||
Defaults to None.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
output: The tool output to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
observation_prefix: Optional prefix to write before the output.
|
||||
llm_prefix: Optional prefix to write after the output.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
if observation_prefix is not None:
|
||||
print_text(f"\n{observation_prefix}", file=self.file)
|
||||
print_text(output, color=color or self.color, file=self.file)
|
||||
self._write(f"\n{observation_prefix}")
|
||||
self._write(output)
|
||||
if llm_prefix is not None:
|
||||
print_text(f"\n{llm_prefix}", file=self.file)
|
||||
self._write(f"\n{llm_prefix}")
|
||||
|
||||
@override
|
||||
def on_text(
|
||||
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when the agent ends.
|
||||
"""Handle text output.
|
||||
|
||||
Args:
|
||||
text (str): The text to print.
|
||||
color (Optional[str], optional): The color to use for the text.
|
||||
Defaults to None.
|
||||
end (str, optional): The end character. Defaults to "".
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
text: The text to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
end: String appended after the text. Defaults to ``""``.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(text, color=color or self.color, end=end, file=self.file)
|
||||
self._write(text, color=color or self.color, end=end)
|
||||
|
||||
@override
|
||||
def on_agent_finish(
|
||||
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run on the agent end.
|
||||
"""Handle agent finish by writing the finish log.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
color (Optional[str], optional): The color to use for the text.
|
||||
Defaults to None.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
finish: The agent finish object containing the log to write.
|
||||
color: Color override for this specific output. If ``None``, uses
|
||||
``self.color``.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
||||
self._write(finish.log, color=color or self.color, end="\n")
|
||||
|
||||
@@ -1066,7 +1066,7 @@ class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
|
||||
|
||||
Args:
|
||||
output (Any): The output of the tool.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
**kwargs (Any): The keyword arguments to pass to the event handler
|
||||
"""
|
||||
if not self.handlers:
|
||||
return
|
||||
@@ -1470,7 +1470,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
input is needed.
|
||||
If provided, the inputs are expected to be formatted as a dict.
|
||||
The keys will correspond to the named-arguments in the tool.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
**kwargs (Any): The keyword arguments to pass to the event handler
|
||||
|
||||
Returns:
|
||||
CallbackManagerForToolRun: The callback manager for the tool run.
|
||||
|
||||
@@ -65,28 +65,32 @@ class BaseChatMessageHistory(ABC):
|
||||
.. code-block:: python
|
||||
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
storage_path: str
|
||||
storage_path: str
|
||||
session_id: str
|
||||
|
||||
@property
|
||||
def messages(self):
|
||||
with open(os.path.join(storage_path, session_id), 'r:utf-8') as f:
|
||||
messages = json.loads(f.read())
|
||||
@property
|
||||
def messages(self):
|
||||
with open(
|
||||
os.path.join(storage_path, session_id),
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
messages = json.loads(f.read())
|
||||
return messages_from_dict(messages)
|
||||
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
||||
all_messages = list(self.messages) # Existing messages
|
||||
all_messages.extend(messages) # Add new messages
|
||||
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
# Can be further optimized by only writing new messages
|
||||
# using append mode.
|
||||
with open(os.path.join(storage_path, session_id), 'w') as f:
|
||||
json.dump(f, messages)
|
||||
serialized = [message_to_dict(message) for message in all_messages]
|
||||
# Can be further optimized by only writing new messages
|
||||
# using append mode.
|
||||
with open(os.path.join(storage_path, session_id), "w") as f:
|
||||
json.dump(messages, f)
|
||||
|
||||
def clear(self):
|
||||
with open(os.path.join(storage_path, session_id), 'w') as f:
|
||||
f.write("[]")
|
||||
def clear(self):
|
||||
with open(os.path.join(storage_path, session_id), "w") as f:
|
||||
f.write("[]")
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage]
|
||||
|
||||
@@ -60,11 +60,11 @@ class BaseLoader(ABC): # noqa: B024
|
||||
)
|
||||
raise ImportError(msg) from e
|
||||
|
||||
_text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
|
||||
text_splitter_: TextSplitter = RecursiveCharacterTextSplitter()
|
||||
else:
|
||||
_text_splitter = text_splitter
|
||||
text_splitter_ = text_splitter
|
||||
docs = self.load()
|
||||
return _text_splitter.split_documents(docs)
|
||||
return text_splitter_.split_documents(docs)
|
||||
|
||||
# Attention: This method will be upgraded into an abstractmethod once it's
|
||||
# implemented in all the existing subclasses.
|
||||
|
||||
@@ -201,14 +201,14 @@ class Blob(BaseMedia):
|
||||
Blob instance
|
||||
"""
|
||||
if mime_type is None and guess_type:
|
||||
_mimetype = mimetypes.guess_type(path)[0] if guess_type else None
|
||||
mimetype = mimetypes.guess_type(path)[0] if guess_type else None
|
||||
else:
|
||||
_mimetype = mime_type
|
||||
mimetype = mime_type
|
||||
# We do not load the data immediately, instead we treat the blob as a
|
||||
# reference to the underlying data.
|
||||
return cls(
|
||||
data=None,
|
||||
mimetype=_mimetype,
|
||||
mimetype=mimetype,
|
||||
encoding=encoding,
|
||||
path=path,
|
||||
metadata=metadata if metadata is not None else {},
|
||||
|
||||
@@ -273,7 +273,7 @@ def index(
|
||||
vector_store: Union[VectorStore, DocumentIndex],
|
||||
*,
|
||||
batch_size: int = 100,
|
||||
cleanup: Literal["incremental", "full", "scoped_full", None] = None,
|
||||
cleanup: Optional[Literal["incremental", "full", "scoped_full"]] = None,
|
||||
source_id_key: Union[str, Callable[[Document], str], None] = None,
|
||||
cleanup_batch_size: int = 1_000,
|
||||
force_update: bool = False,
|
||||
@@ -540,10 +540,10 @@ def index(
|
||||
)
|
||||
raise AssertionError(msg)
|
||||
|
||||
_source_ids = cast("Sequence[str]", source_ids)
|
||||
source_ids_ = cast("Sequence[str]", source_ids)
|
||||
|
||||
while uids_to_delete := record_manager.list_keys(
|
||||
group_ids=_source_ids, before=index_start_dt, limit=cleanup_batch_size
|
||||
group_ids=source_ids_, before=index_start_dt, limit=cleanup_batch_size
|
||||
):
|
||||
# Then delete from vector store.
|
||||
_delete(destination, uids_to_delete)
|
||||
@@ -609,7 +609,7 @@ async def aindex(
|
||||
vector_store: Union[VectorStore, DocumentIndex],
|
||||
*,
|
||||
batch_size: int = 100,
|
||||
cleanup: Literal["incremental", "full", "scoped_full", None] = None,
|
||||
cleanup: Optional[Literal["incremental", "full", "scoped_full"]] = None,
|
||||
source_id_key: Union[str, Callable[[Document], str], None] = None,
|
||||
cleanup_batch_size: int = 1_000,
|
||||
force_update: bool = False,
|
||||
@@ -881,10 +881,10 @@ async def aindex(
|
||||
)
|
||||
raise AssertionError(msg)
|
||||
|
||||
_source_ids = cast("Sequence[str]", source_ids)
|
||||
source_ids_ = cast("Sequence[str]", source_ids)
|
||||
|
||||
while uids_to_delete := await record_manager.alist_keys(
|
||||
group_ids=_source_ids, before=index_start_dt, limit=cleanup_batch_size
|
||||
group_ids=source_ids_, before=index_start_dt, limit=cleanup_batch_size
|
||||
):
|
||||
# Then delete from vector store.
|
||||
await _adelete(destination, uids_to_delete)
|
||||
|
||||
@@ -123,7 +123,7 @@ def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
|
||||
# Subset to (PDF) files and audio, as most relevant chat models
|
||||
# support images in OAI format (and some may not yet support the
|
||||
# standard data block format)
|
||||
and block.get("type") in ("file", "input_audio")
|
||||
and block.get("type") in {"file", "input_audio"}
|
||||
and _is_openai_data_block(block)
|
||||
):
|
||||
if formatted_message is message:
|
||||
|
||||
@@ -130,7 +130,7 @@ class BaseLanguageModel(
|
||||
)
|
||||
|
||||
@field_validator("verbose", mode="before")
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool: # noqa: FBT001
|
||||
"""If verbose is None, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
|
||||
@@ -311,6 +311,18 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
does not properly support streaming.
|
||||
"""
|
||||
|
||||
output_version: str = "v0"
|
||||
"""Version of AIMessage output format to use.
|
||||
|
||||
This field is used to roll-out new output formats for chat model AIMessages
|
||||
in a backwards-compatible way.
|
||||
|
||||
All chat models currently support the default of ``"v0"``. Chat model subclasses
|
||||
can override with (customizable) supported values.
|
||||
|
||||
.. versionadded:: 0.3.68
|
||||
"""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def raise_deprecation(cls, values: dict) -> Any:
|
||||
@@ -1263,8 +1275,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
Returns:
|
||||
The predicted output string.
|
||||
"""
|
||||
_stop = None if stop is None else list(stop)
|
||||
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
result = self([HumanMessage(content=text)], stop=stop_, **kwargs)
|
||||
if isinstance(result.content, str):
|
||||
return result.content
|
||||
msg = "Cannot use predict when output is not a string."
|
||||
@@ -1279,17 +1291,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
stop: Optional[Sequence[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
_stop = None if stop is None else list(stop)
|
||||
return self(messages, stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
return self(messages, stop=stop_, **kwargs)
|
||||
|
||||
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
||||
@override
|
||||
async def apredict(
|
||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||
) -> str:
|
||||
_stop = None if stop is None else list(stop)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
result = await self._call_async(
|
||||
[HumanMessage(content=text)], stop=_stop, **kwargs
|
||||
[HumanMessage(content=text)], stop=stop_, **kwargs
|
||||
)
|
||||
if isinstance(result.content, str):
|
||||
return result.content
|
||||
@@ -1305,8 +1317,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
stop: Optional[Sequence[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
_stop = None if stop is None else list(stop)
|
||||
return await self._call_async(messages, stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
return await self._call_async(messages, stop=stop_, **kwargs)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
|
||||
@@ -93,10 +93,10 @@ def create_base_retry_decorator(
|
||||
Raises:
|
||||
ValueError: If the cache is not set and cache is True.
|
||||
"""
|
||||
_logging = before_sleep_log(logger, logging.WARNING)
|
||||
logging_ = before_sleep_log(logger, logging.WARNING)
|
||||
|
||||
def _before_sleep(retry_state: RetryCallState) -> None:
|
||||
_logging(retry_state)
|
||||
logging_(retry_state)
|
||||
if run_manager:
|
||||
if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
|
||||
coro = run_manager.on_retry(retry_state)
|
||||
@@ -119,7 +119,7 @@ def create_base_retry_decorator(
|
||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
||||
retry_instance: retry_base = retry_if_exception_type(error_types[0])
|
||||
for error in error_types[1:]:
|
||||
retry_instance = retry_instance | retry_if_exception_type(error)
|
||||
retry_instance |= retry_if_exception_type(error)
|
||||
return retry(
|
||||
reraise=True,
|
||||
stop=stop_after_attempt(max_retries),
|
||||
@@ -129,7 +129,7 @@ def create_base_retry_decorator(
|
||||
)
|
||||
|
||||
|
||||
def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
||||
def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
||||
"""Resolve the cache."""
|
||||
if isinstance(cache, BaseCache):
|
||||
llm_cache = cache
|
||||
@@ -155,7 +155,7 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
||||
def get_prompts(
|
||||
params: dict[str, Any],
|
||||
prompts: list[str],
|
||||
cache: Optional[Union[BaseCache, bool, None]] = None,
|
||||
cache: Union[BaseCache, bool, None] = None, # noqa: FBT001
|
||||
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
||||
"""Get prompts that are already cached.
|
||||
|
||||
@@ -176,7 +176,7 @@ def get_prompts(
|
||||
missing_prompt_idxs = []
|
||||
existing_prompts = {}
|
||||
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, prompt in enumerate(prompts):
|
||||
if llm_cache:
|
||||
cache_val = llm_cache.lookup(prompt, llm_string)
|
||||
@@ -191,7 +191,7 @@ def get_prompts(
|
||||
async def aget_prompts(
|
||||
params: dict[str, Any],
|
||||
prompts: list[str],
|
||||
cache: Optional[Union[BaseCache, bool, None]] = None,
|
||||
cache: Union[BaseCache, bool, None] = None, # noqa: FBT001
|
||||
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
||||
"""Get prompts that are already cached. Async version.
|
||||
|
||||
@@ -211,7 +211,7 @@ async def aget_prompts(
|
||||
missing_prompts = []
|
||||
missing_prompt_idxs = []
|
||||
existing_prompts = {}
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, prompt in enumerate(prompts):
|
||||
if llm_cache:
|
||||
cache_val = await llm_cache.alookup(prompt, llm_string)
|
||||
@@ -224,7 +224,7 @@ async def aget_prompts(
|
||||
|
||||
|
||||
def update_cache(
|
||||
cache: Union[BaseCache, bool, None],
|
||||
cache: Union[BaseCache, bool, None], # noqa: FBT001
|
||||
existing_prompts: dict[int, list],
|
||||
llm_string: str,
|
||||
missing_prompt_idxs: list[int],
|
||||
@@ -247,7 +247,7 @@ def update_cache(
|
||||
Raises:
|
||||
ValueError: If the cache is not set and cache is True.
|
||||
"""
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, result in enumerate(new_results.generations):
|
||||
existing_prompts[missing_prompt_idxs[i]] = result
|
||||
prompt = prompts[missing_prompt_idxs[i]]
|
||||
@@ -257,7 +257,7 @@ def update_cache(
|
||||
|
||||
|
||||
async def aupdate_cache(
|
||||
cache: Union[BaseCache, bool, None],
|
||||
cache: Union[BaseCache, bool, None], # noqa: FBT001
|
||||
existing_prompts: dict[int, list],
|
||||
llm_string: str,
|
||||
missing_prompt_idxs: list[int],
|
||||
@@ -280,7 +280,7 @@ async def aupdate_cache(
|
||||
Raises:
|
||||
ValueError: If the cache is not set and cache is True.
|
||||
"""
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, result in enumerate(new_results.generations):
|
||||
existing_prompts[missing_prompt_idxs[i]] = result
|
||||
prompt = prompts[missing_prompt_idxs[i]]
|
||||
@@ -877,8 +877,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
**(metadata or {}),
|
||||
**self._get_ls_params(stop=stop, **kwargs),
|
||||
}
|
||||
else:
|
||||
pass
|
||||
if (
|
||||
isinstance(callbacks, list)
|
||||
and callbacks
|
||||
@@ -1132,8 +1130,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
**(metadata or {}),
|
||||
**self._get_ls_params(stop=stop, **kwargs),
|
||||
}
|
||||
else:
|
||||
pass
|
||||
# Create callback managers
|
||||
if isinstance(callbacks, list) and (
|
||||
isinstance(callbacks[0], (list, BaseCallbackManager))
|
||||
@@ -1352,8 +1348,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
def predict(
|
||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||
) -> str:
|
||||
_stop = None if stop is None else list(stop)
|
||||
return self(text, stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
return self(text, stop=stop_, **kwargs)
|
||||
|
||||
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
||||
@override
|
||||
@@ -1365,8 +1361,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
**kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
text = get_buffer_string(messages)
|
||||
_stop = None if stop is None else list(stop)
|
||||
content = self(text, stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
content = self(text, stop=stop_, **kwargs)
|
||||
return AIMessage(content=content)
|
||||
|
||||
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
||||
@@ -1374,8 +1370,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
async def apredict(
|
||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||
) -> str:
|
||||
_stop = None if stop is None else list(stop)
|
||||
return await self._call_async(text, stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
return await self._call_async(text, stop=stop_, **kwargs)
|
||||
|
||||
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
||||
@override
|
||||
@@ -1387,8 +1383,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
**kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
text = get_buffer_string(messages)
|
||||
_stop = None if stop is None else list(stop)
|
||||
content = await self._call_async(text, stop=_stop, **kwargs)
|
||||
stop_ = None if stop is None else list(stop)
|
||||
content = await self._call_async(text, stop=stop_, **kwargs)
|
||||
return AIMessage(content=content)
|
||||
|
||||
def __str__(self) -> str:
|
||||
@@ -1418,9 +1414,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
ValueError: If the file path is not a string or Path object.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
.. code-block:: python
|
||||
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
save_path = Path(file_path)
|
||||
|
||||
@@ -355,19 +355,19 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
||||
Returns:
|
||||
SerializedNotImplemented
|
||||
"""
|
||||
_id: list[str] = []
|
||||
id_: list[str] = []
|
||||
try:
|
||||
if hasattr(obj, "__name__"):
|
||||
_id = [*obj.__module__.split("."), obj.__name__]
|
||||
id_ = [*obj.__module__.split("."), obj.__name__]
|
||||
elif hasattr(obj, "__class__"):
|
||||
_id = [*obj.__class__.__module__.split("."), obj.__class__.__name__]
|
||||
id_ = [*obj.__class__.__module__.split("."), obj.__class__.__name__]
|
||||
except Exception:
|
||||
logger.debug("Failed to serialize object", exc_info=True)
|
||||
|
||||
result: SerializedNotImplemented = {
|
||||
"lc": 1,
|
||||
"type": "not_implemented",
|
||||
"id": _id,
|
||||
"id": id_,
|
||||
"repr": None,
|
||||
}
|
||||
with contextlib.suppress(Exception):
|
||||
|
||||
@@ -33,6 +33,15 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
||||
from langchain_core.messages.content_blocks import (
|
||||
Base64ContentBlock,
|
||||
ContentBlock,
|
||||
DocumentCitation,
|
||||
NonStandardAnnotation,
|
||||
NonStandardContentBlock,
|
||||
ReasoningContentBlock,
|
||||
TextContentBlock,
|
||||
ToolCallContentBlock,
|
||||
UrlCitation,
|
||||
convert_to_openai_data_block,
|
||||
convert_to_openai_image_block,
|
||||
is_data_content_block,
|
||||
@@ -66,23 +75,32 @@ __all__ = (
|
||||
"AIMessage",
|
||||
"AIMessageChunk",
|
||||
"AnyMessage",
|
||||
"Base64ContentBlock",
|
||||
"BaseMessage",
|
||||
"BaseMessageChunk",
|
||||
"ChatMessage",
|
||||
"ChatMessageChunk",
|
||||
"ContentBlock",
|
||||
"DocumentCitation",
|
||||
"FunctionMessage",
|
||||
"FunctionMessageChunk",
|
||||
"HumanMessage",
|
||||
"HumanMessageChunk",
|
||||
"InvalidToolCall",
|
||||
"MessageLikeRepresentation",
|
||||
"NonStandardAnnotation",
|
||||
"NonStandardContentBlock",
|
||||
"ReasoningContentBlock",
|
||||
"RemoveMessage",
|
||||
"SystemMessage",
|
||||
"SystemMessageChunk",
|
||||
"TextContentBlock",
|
||||
"ToolCall",
|
||||
"ToolCallChunk",
|
||||
"ToolCallContentBlock",
|
||||
"ToolMessage",
|
||||
"ToolMessageChunk",
|
||||
"UrlCitation",
|
||||
"_message_from_dict",
|
||||
"convert_to_messages",
|
||||
"convert_to_openai_data_block",
|
||||
@@ -103,25 +121,34 @@ __all__ = (
|
||||
_dynamic_imports = {
|
||||
"AIMessage": "ai",
|
||||
"AIMessageChunk": "ai",
|
||||
"Base64ContentBlock": "content_blocks",
|
||||
"BaseMessage": "base",
|
||||
"BaseMessageChunk": "base",
|
||||
"merge_content": "base",
|
||||
"message_to_dict": "base",
|
||||
"messages_to_dict": "base",
|
||||
"ContentBlock": "content_blocks",
|
||||
"ChatMessage": "chat",
|
||||
"ChatMessageChunk": "chat",
|
||||
"DocumentCitation": "content_blocks",
|
||||
"FunctionMessage": "function",
|
||||
"FunctionMessageChunk": "function",
|
||||
"HumanMessage": "human",
|
||||
"HumanMessageChunk": "human",
|
||||
"NonStandardAnnotation": "content_blocks",
|
||||
"NonStandardContentBlock": "content_blocks",
|
||||
"ReasoningContentBlock": "content_blocks",
|
||||
"RemoveMessage": "modifier",
|
||||
"SystemMessage": "system",
|
||||
"SystemMessageChunk": "system",
|
||||
"InvalidToolCall": "tool",
|
||||
"TextContentBlock": "content_blocks",
|
||||
"ToolCall": "tool",
|
||||
"ToolCallChunk": "tool",
|
||||
"ToolCallContentBlock": "content_blocks",
|
||||
"ToolMessage": "tool",
|
||||
"ToolMessageChunk": "tool",
|
||||
"UrlCitation": "content_blocks",
|
||||
"AnyMessage": "utils",
|
||||
"MessageLikeRepresentation": "utils",
|
||||
"_message_from_dict": "utils",
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
||||
from pydantic import ConfigDict, Field
|
||||
|
||||
from langchain_core.load.serializable import Serializable
|
||||
from langchain_core.messages import ContentBlock
|
||||
from langchain_core.utils import get_bolded_text
|
||||
from langchain_core.utils._merge import merge_dicts, merge_lists
|
||||
from langchain_core.utils.interactive_env import is_interactive_env
|
||||
@@ -23,7 +24,7 @@ class BaseMessage(Serializable):
|
||||
Messages are the inputs and outputs of ChatModels.
|
||||
"""
|
||||
|
||||
content: Union[str, list[Union[str, dict]]]
|
||||
content: Union[str, list[Union[str, ContentBlock, dict]]]
|
||||
"""The string contents of the message."""
|
||||
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
|
||||
@@ -7,7 +7,94 @@ from pydantic import TypeAdapter, ValidationError
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
|
||||
class BaseDataContentBlock(TypedDict, total=False):
|
||||
# Text and annotations
|
||||
class UrlCitation(TypedDict):
|
||||
"""Citation from a URL."""
|
||||
|
||||
type: Literal["url_citation"]
|
||||
|
||||
url: str
|
||||
"""Source URL."""
|
||||
|
||||
title: NotRequired[str]
|
||||
"""Source title."""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Text from the source that is being cited."""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the response text for which the annotation applies."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the response text for which the annotation applies."""
|
||||
|
||||
|
||||
class DocumentCitation(TypedDict):
|
||||
"""Annotation for data from a document."""
|
||||
|
||||
type: Literal["document_citation"]
|
||||
|
||||
title: NotRequired[str]
|
||||
"""Source title."""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Text from the source that is being cited."""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the response text for which the annotation applies."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the response text for which the annotation applies."""
|
||||
|
||||
|
||||
class NonStandardAnnotation(TypedDict):
|
||||
"""Provider-specific annotation format."""
|
||||
|
||||
type: Literal["non_standard_annotation"]
|
||||
"""Type of the content block."""
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific annotation data."""
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
"""Content block for text output."""
|
||||
|
||||
type: Literal["text"]
|
||||
"""Type of the content block."""
|
||||
text: str
|
||||
"""Block text."""
|
||||
annotations: NotRequired[
|
||||
list[Union[UrlCitation, DocumentCitation, NonStandardAnnotation]]
|
||||
]
|
||||
"""Citations and other annotations."""
|
||||
|
||||
|
||||
# Tool calls
|
||||
class ToolCallContentBlock(TypedDict):
|
||||
"""Content block for tool calls.
|
||||
|
||||
These are references to a :class:`~langchain_core.messages.tool.ToolCall` in the
|
||||
message's ``tool_calls`` attribute.
|
||||
"""
|
||||
|
||||
type: Literal["tool_call"]
|
||||
"""Type of the content block."""
|
||||
id: str
|
||||
"""Tool call ID."""
|
||||
|
||||
|
||||
# Reasoning
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Content block for reasoning output."""
|
||||
|
||||
type: Literal["reasoning"]
|
||||
"""Type of the content block."""
|
||||
reasoning: NotRequired[str]
|
||||
"""Reasoning text."""
|
||||
|
||||
|
||||
# Multi-modal
|
||||
class BaseDataContentBlock(TypedDict):
|
||||
"""Base class for data content blocks."""
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
@@ -47,7 +134,7 @@ class PlainTextContentBlock(BaseDataContentBlock):
|
||||
"""Text data."""
|
||||
|
||||
|
||||
class IDContentBlock(TypedDict):
|
||||
class IDContentBlock(BaseDataContentBlock):
|
||||
"""Content block for data specified by an identifier."""
|
||||
|
||||
type: Literal["image", "audio", "file"]
|
||||
@@ -68,6 +155,28 @@ DataContentBlock = Union[
|
||||
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
|
||||
|
||||
|
||||
# Non-standard
|
||||
class NonStandardContentBlock(TypedDict, total=False):
|
||||
"""Content block provider-specific data.
|
||||
|
||||
This block contains data for which there is not yet a standard type.
|
||||
"""
|
||||
|
||||
type: Literal["non_standard"]
|
||||
"""Type of the content block."""
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific data."""
|
||||
|
||||
|
||||
ContentBlock = Union[
|
||||
TextContentBlock,
|
||||
ToolCallContentBlock,
|
||||
ReasoningContentBlock,
|
||||
DataContentBlock,
|
||||
NonStandardContentBlock,
|
||||
]
|
||||
|
||||
|
||||
def is_data_content_block(
|
||||
content_block: dict,
|
||||
) -> bool:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user