mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 08:40:36 +00:00
Compare commits
242 Commits
langchain-
...
cc/lock_te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7133d760b | ||
|
|
b9357d456e | ||
|
|
532e6455e9 | ||
|
|
52e57cdc20 | ||
|
|
cecfec5efa | ||
|
|
50f998a138 | ||
|
|
f345ae5a1d | ||
|
|
01fcdff118 | ||
|
|
5839801897 | ||
|
|
0c10ff6418 | ||
|
|
bb625081c8 | ||
|
|
ddc850ca72 | ||
|
|
50f9354d31 | ||
|
|
446a9d5647 | ||
|
|
d10fd02bb3 | ||
|
|
4071670f56 | ||
|
|
40d6d4c738 | ||
|
|
42eb356a44 | ||
|
|
40bd71caa5 | ||
|
|
1935e4526a | ||
|
|
323850fae1 | ||
|
|
eadbb9077e | ||
|
|
b0f100af7e | ||
|
|
5b165effcd | ||
|
|
e455fab5d3 | ||
|
|
b21526fe38 | ||
|
|
9ce974247c | ||
|
|
16e5a12806 | ||
|
|
71b0f78952 | ||
|
|
575662d5f1 | ||
|
|
ece9e31a7a | ||
|
|
5187817006 | ||
|
|
761f8c3231 | ||
|
|
0375848f6c | ||
|
|
9c639035c0 | ||
|
|
a1f068eb85 | ||
|
|
4cc2f6b807 | ||
|
|
abc8bf9f1c | ||
|
|
c25b832f51 | ||
|
|
35ae5eab4f | ||
|
|
73655b0ca8 | ||
|
|
f7f52cab12 | ||
|
|
14c561e15d | ||
|
|
6d6f305748 | ||
|
|
815bfa5408 | ||
|
|
ae3551c96b | ||
|
|
dea43436ea | ||
|
|
43bee469ce | ||
|
|
741bb1ffa1 | ||
|
|
b149cce5f8 | ||
|
|
222578b296 | ||
|
|
e845a83099 | ||
|
|
457b235b5d | ||
|
|
d2a023a183 | ||
|
|
21d6f1fc6a | ||
|
|
f97e1825b7 | ||
|
|
81db124351 | ||
|
|
4ec46aeb73 | ||
|
|
993e34fafb | ||
|
|
ec8bab83f8 | ||
|
|
310e643842 | ||
|
|
e70ec3b9fa | ||
|
|
9649222322 | ||
|
|
539e5b6936 | ||
|
|
2c4e0ab3bc | ||
|
|
b93ed192bd | ||
|
|
c6885a0f23 | ||
|
|
f64d48d507 | ||
|
|
6cb3ea514a | ||
|
|
227aac5d07 | ||
|
|
05b9bce05b | ||
|
|
d359b7b737 | ||
|
|
2e4d76d772 | ||
|
|
ffa32a1802 | ||
|
|
17f34baa88 | ||
|
|
9a78246d29 | ||
|
|
d3be4a0c56 | ||
|
|
3db1aa0ba6 | ||
|
|
d7f90f233b | ||
|
|
38c19d2891 | ||
|
|
c284fdae89 | ||
|
|
e16e09637c | ||
|
|
d1b32e0ecf | ||
|
|
079b97efde | ||
|
|
8e3dc1f2ea | ||
|
|
48fa4ca271 | ||
|
|
5bf89628bf | ||
|
|
5b9394319b | ||
|
|
bbb60e210a | ||
|
|
d79b5813a0 | ||
|
|
729526ff7c | ||
|
|
b7f34749b1 | ||
|
|
dd4fc8ab8f | ||
|
|
cc6df95e58 | ||
|
|
c8951ca124 | ||
|
|
19f2a92609 | ||
|
|
394d42b4ae | ||
|
|
afd349cc95 | ||
|
|
e6633a7efb | ||
|
|
d9631edd87 | ||
|
|
c532facbc4 | ||
|
|
1917dd1ccd | ||
|
|
49eeb0f3c3 | ||
|
|
6d39e59c2e | ||
|
|
b808d27284 | ||
|
|
b81a4ed77e | ||
|
|
0e3f35effe | ||
|
|
0aec05bde5 | ||
|
|
bf1750a771 | ||
|
|
7b8b8a26e7 | ||
|
|
60119b9ba6 | ||
|
|
5a13ad02ca | ||
|
|
0c80a2c55c | ||
|
|
dcb88b618d | ||
|
|
9bd956598d | ||
|
|
0478f544d5 | ||
|
|
ab8b4003be | ||
|
|
c8a656c05b | ||
|
|
6ecc85c163 | ||
|
|
5bff018951 | ||
|
|
8b1f54c419 | ||
|
|
443341a20d | ||
|
|
930aa6073e | ||
|
|
580986b260 | ||
|
|
1ebcbf1d11 | ||
|
|
611a3cb02a | ||
|
|
4f94548bb7 | ||
|
|
71cf26095e | ||
|
|
4e8779b3a5 | ||
|
|
19166a6af6 | ||
|
|
b5dfdd1ab2 | ||
|
|
c8448c9a61 | ||
|
|
de3d8b4b5c | ||
|
|
d0b7e2f03d | ||
|
|
20e48598dc | ||
|
|
1c03555f95 | ||
|
|
1a873fca87 | ||
|
|
6bc497cc0f | ||
|
|
5bf539f405 | ||
|
|
f2b4698b54 | ||
|
|
e2b54a5d72 | ||
|
|
83ea571d57 | ||
|
|
635ce60a22 | ||
|
|
0ce2e69cc1 | ||
|
|
851fd438cf | ||
|
|
092697de60 | ||
|
|
71c074d28f | ||
|
|
053a1246da | ||
|
|
1b5ffe4107 | ||
|
|
f16456139b | ||
|
|
cf1fa27e27 | ||
|
|
beacedd6b3 | ||
|
|
53d6286539 | ||
|
|
7b45d46210 | ||
|
|
580fc7d464 | ||
|
|
6993bc9ad1 | ||
|
|
dcb5aba999 | ||
|
|
f29659728c | ||
|
|
916768e3c1 | ||
|
|
ff12555bdc | ||
|
|
0c6137ec2b | ||
|
|
bf645c83f4 | ||
|
|
49fbcec34f | ||
|
|
32fcc97a90 | ||
|
|
8b6fec89bc | ||
|
|
ea1f9e2d5b | ||
|
|
17c5a1621f | ||
|
|
e1af509966 | ||
|
|
eb25d7472d | ||
|
|
c982573f1e | ||
|
|
671e4fd114 | ||
|
|
bd367ba10c | ||
|
|
1f43b6062e | ||
|
|
c178ad87b6 | ||
|
|
12b063eb67 | ||
|
|
a401d7e52a | ||
|
|
9efafe3337 | ||
|
|
03adca6c44 | ||
|
|
6bbc12b7f7 | ||
|
|
aa4890c136 | ||
|
|
a8f2ddee31 | ||
|
|
6cd1aadf60 | ||
|
|
eab8484a80 | ||
|
|
672339f3c6 | ||
|
|
6f2acbcf2e | ||
|
|
8b145d5dc3 | ||
|
|
d4f77a8c8f | ||
|
|
71b71768bf | ||
|
|
921573e2b7 | ||
|
|
d8a7eda12e | ||
|
|
8af0dc5fd6 | ||
|
|
7263011b24 | ||
|
|
1523602196 | ||
|
|
367566b02f | ||
|
|
29bfbc0ea6 | ||
|
|
b8ae2de169 | ||
|
|
263c215112 | ||
|
|
17b799860f | ||
|
|
0b8837a0cc | ||
|
|
4f41b54bcb | ||
|
|
ce0b1a9428 | ||
|
|
275e3b6710 | ||
|
|
e53c10e546 | ||
|
|
395f057243 | ||
|
|
a9ee625f32 | ||
|
|
544648eb71 | ||
|
|
40be8d1d90 | ||
|
|
f034bd7933 | ||
|
|
17a04dd598 | ||
|
|
a44e707811 | ||
|
|
3520520a48 | ||
|
|
09d74504e3 | ||
|
|
b2f0fbfea5 | ||
|
|
636a35fc2d | ||
|
|
7b9feb60cc | ||
|
|
87add0809f | ||
|
|
868cfc4a8f | ||
|
|
83d006190d | ||
|
|
1e56c66f86 | ||
|
|
92af7b0933 | ||
|
|
e6147ce5d2 | ||
|
|
0d59fe9789 | ||
|
|
ff9183fd3c | ||
|
|
65fbbb0249 | ||
|
|
77d3f04e0a | ||
|
|
0dee089ba7 | ||
|
|
2ec74fea44 | ||
|
|
683da2c9e9 | ||
|
|
0ef4ac75b7 | ||
|
|
23ec06b481 | ||
|
|
e9e597be8e | ||
|
|
0ba8697286 | ||
|
|
9aac8923a3 | ||
|
|
efc52e18e9 | ||
|
|
2d202f9762 | ||
|
|
d4555ac924 | ||
|
|
e34f9fd6f7 | ||
|
|
6c3901f9f9 | ||
|
|
682f338c17 | ||
|
|
d7e016c5fc | ||
|
|
4b11cbeb47 | ||
|
|
b5b90b5929 |
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
# and
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
12
.github/scripts/check_diff.py
vendored
12
.github/scripts/check_diff.py
vendored
@@ -37,7 +37,6 @@ IGNORED_PARTNERS = [
|
||||
]
|
||||
|
||||
PY_312_MAX_PACKAGES = [
|
||||
"libs/partners/voyageai",
|
||||
"libs/partners/chroma", # https://github.com/chroma-core/chroma/issues/4382
|
||||
]
|
||||
|
||||
@@ -120,7 +119,9 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
if job == "test-pydantic":
|
||||
return _get_pydantic_test_configs(dir_)
|
||||
|
||||
if dir_ == "libs/core":
|
||||
if job == "codspeed":
|
||||
py_versions = ["3.12"] # 3.13 is not yet supported
|
||||
elif dir_ == "libs/core":
|
||||
py_versions = ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
# custom logic for specific directories
|
||||
elif dir_ == "libs/partners/milvus":
|
||||
@@ -211,6 +212,8 @@ def _get_configs_for_multi_dirs(
|
||||
)
|
||||
elif job == "extended-tests":
|
||||
dirs = list(dirs_to_run["extended-test"])
|
||||
elif job == "codspeed":
|
||||
dirs = list(dirs_to_run["codspeed"])
|
||||
else:
|
||||
raise ValueError(f"Unknown job: {job}")
|
||||
|
||||
@@ -226,6 +229,7 @@ if __name__ == "__main__":
|
||||
"lint": set(),
|
||||
"test": set(),
|
||||
"extended-test": set(),
|
||||
"codspeed": set(),
|
||||
}
|
||||
docs_edited = False
|
||||
|
||||
@@ -249,6 +253,8 @@ if __name__ == "__main__":
|
||||
dirs_to_run["extended-test"].update(LANGCHAIN_DIRS)
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
if file.startswith("libs/core"):
|
||||
dirs_to_run["codspeed"].add(f"libs/core")
|
||||
if any(file.startswith(dir_) for dir_ in LANGCHAIN_DIRS):
|
||||
# add that dir and all dirs after in LANGCHAIN_DIRS
|
||||
# for extended testing
|
||||
@@ -287,6 +293,7 @@ if __name__ == "__main__":
|
||||
if not filename.startswith(".")
|
||||
] != ["README.md"]:
|
||||
dirs_to_run["test"].add(f"libs/partners/{partner_dir}")
|
||||
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
|
||||
# Skip if the directory was deleted or is just a tombstone readme
|
||||
elif file == "libs/packages.yml":
|
||||
continue
|
||||
@@ -312,6 +319,7 @@ if __name__ == "__main__":
|
||||
"compile-integration-tests",
|
||||
"dependencies",
|
||||
"test-pydantic",
|
||||
"codspeed",
|
||||
]
|
||||
}
|
||||
map_job_to_configs["test-doc-imports"] = (
|
||||
|
||||
3
.github/workflows/_integration_test.yml
vendored
3
.github/workflows/_integration_test.yml
vendored
@@ -41,6 +41,8 @@ jobs:
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
ANTHROPIC_FILES_API_IMAGE_ID: ${{ secrets.ANTHROPIC_FILES_API_IMAGE_ID }}
|
||||
ANTHROPIC_FILES_API_PDF_ID: ${{ secrets.ANTHROPIC_FILES_API_PDF_ID }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
@@ -67,7 +69,6 @@ jobs:
|
||||
ES_CLOUD_ID: ${{ secrets.ES_CLOUD_ID }}
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
|
||||
|
||||
3
.github/workflows/_release.yml
vendored
3
.github/workflows/_release.yml
vendored
@@ -322,7 +322,6 @@ jobs:
|
||||
ES_CLOUD_ID: ${{ secrets.ES_CLOUD_ID }}
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
|
||||
@@ -345,6 +344,8 @@ jobs:
|
||||
fail-fast: false # Continue testing other partners if one fails
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
ANTHROPIC_FILES_API_IMAGE_ID: ${{ secrets.ANTHROPIC_FILES_API_IMAGE_ID }}
|
||||
ANTHROPIC_FILES_API_PDF_ID: ${{ secrets.ANTHROPIC_FILES_API_PDF_ID }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
|
||||
2
.github/workflows/check-broken-links.yml
vendored
2
.github/workflows/check-broken-links.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 18.x
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.x
|
||||
cache: "yarn"
|
||||
|
||||
3
.github/workflows/check_diffs.yml
vendored
3
.github/workflows/check_diffs.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
uses: Ana06/get-changed-files@v2.3.0
|
||||
- id: set-matrix
|
||||
run: |
|
||||
python -m pip install packaging requests
|
||||
@@ -152,6 +152,7 @@ jobs:
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
ci_success:
|
||||
name: "CI Success"
|
||||
needs: [build, lint, test, compile-integration-tests, extended-tests, test-doc-imports, test-pydantic]
|
||||
|
||||
2
.github/workflows/check_new_docs.yml
vendored
2
.github/workflows/check_new_docs.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
uses: Ana06/get-changed-files@v2.3.0
|
||||
with:
|
||||
filter: |
|
||||
*.ipynb
|
||||
|
||||
48
.github/workflows/codspeed.yml
vendored
48
.github/workflows/codspeed.yml
vendored
@@ -5,40 +5,58 @@ on:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
paths:
|
||||
- 'libs/core/**'
|
||||
# `workflow_dispatch` allows CodSpeed to trigger backtest
|
||||
# performance analysis in order to generate initial data.
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: foo
|
||||
AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: foo
|
||||
DEEPSEEK_API_KEY: foo
|
||||
FIREWORKS_API_KEY: foo
|
||||
|
||||
jobs:
|
||||
codspeed:
|
||||
name: Run benchmarks
|
||||
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'run-codspeed-benchmarks')) || github.event_name == 'workflow_dispatch' || github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- working-directory: libs/core
|
||||
mode: walltime
|
||||
- working-directory: libs/partners/openai
|
||||
- working-directory: libs/partners/anthropic
|
||||
- working-directory: libs/partners/deepseek
|
||||
- working-directory: libs/partners/fireworks
|
||||
- working-directory: libs/partners/xai
|
||||
- working-directory: libs/partners/mistralai
|
||||
- working-directory: libs/partners/groq
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# We have to use 3.12, 3.13 is not yet supported
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
# Using this action is still necessary for CodSpeed to work
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: install deps
|
||||
- name: Install dependencies
|
||||
run: uv sync --group test
|
||||
working-directory: ./libs/core
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
|
||||
- name: Run benchmarks
|
||||
- name: Run benchmarks ${{ matrix.working-directory }}
|
||||
uses: CodSpeedHQ/action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODSPEED_TOKEN }}
|
||||
run: |
|
||||
cd libs/core
|
||||
uv run --no-sync pytest ./tests/benchmarks --codspeed
|
||||
mode: walltime
|
||||
cd ${{ matrix.working-directory }}
|
||||
if [ "${{ matrix.working-directory }}" = "libs/core" ]; then
|
||||
uv run --no-sync pytest ./tests/benchmarks --codspeed
|
||||
else
|
||||
uv run --no-sync pytest ./tests/ --codspeed
|
||||
fi
|
||||
mode: ${{ matrix.mode || 'instrumentation' }}
|
||||
|
||||
2
.github/workflows/scheduled_test.yml
vendored
2
.github/workflows/scheduled_test.yml
vendored
@@ -127,6 +127,8 @@ jobs:
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
ANTHROPIC_FILES_API_IMAGE_ID: ${{ secrets.ANTHROPIC_FILES_API_IMAGE_ID }}
|
||||
ANTHROPIC_FILES_API_PDF_ID: ${{ secrets.ANTHROPIC_FILES_API_PDF_ID }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
|
||||
@@ -103,12 +103,6 @@ repos:
|
||||
entry: make -C libs/partners/qdrant format
|
||||
files: ^libs/partners/qdrant/
|
||||
pass_filenames: false
|
||||
- id: voyageai
|
||||
name: format partners/voyageai
|
||||
language: system
|
||||
entry: make -C libs/partners/voyageai format
|
||||
files: ^libs/partners/voyageai/
|
||||
pass_filenames: false
|
||||
- id: root
|
||||
name: format docs, cookbook
|
||||
language: system
|
||||
|
||||
2
Makefile
2
Makefile
@@ -48,7 +48,7 @@ api_docs_quick_preview:
|
||||
api_docs_clean:
|
||||
find ./docs/api_reference -name '*_api_reference.rst' -delete
|
||||
git clean -fdX ./docs/api_reference
|
||||
rm docs/api_reference/index.md
|
||||
rm -f docs/api_reference/index.md
|
||||
|
||||
|
||||
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.
|
||||
|
||||
@@ -66,7 +66,7 @@ reliably handle complex tasks with LangGraph, our low-level agent orchestration
|
||||
framework. LangGraph offers customizable architecture, long-term memory, and
|
||||
human-in-the-loop workflows — and is trusted in production by companies like LinkedIn,
|
||||
Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform) - Deploy
|
||||
- [LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/langgraph_platform/) - Deploy
|
||||
and scale agents effortlessly with a purpose-built deployment platform for long
|
||||
running, stateful workflows. Discover, reuse, configure, and share agents across
|
||||
teams — and iterate quickly with visual prototyping in
|
||||
|
||||
@@ -7,8 +7,8 @@ LangChain has a large ecosystem of integrations with various external resources
|
||||
When building such applications developers should remember to follow good security practices:
|
||||
|
||||
* [**Limit Permissions**](https://en.wikipedia.org/wiki/Principle_of_least_privilege): Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), specifying proxy configurations to control external requests, etc. as appropriate for your application.
|
||||
* **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it’s safest to assume that any LLM able to use those credentials may in fact delete data.
|
||||
* [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It’s best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.
|
||||
* **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it's safest to assume that any LLM able to use those credentials may in fact delete data.
|
||||
* [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It's best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.
|
||||
|
||||
Risks of not doing so include, but are not limited to:
|
||||
* Data corruption or loss.
|
||||
@@ -39,7 +39,7 @@ Before reporting a vulnerability, please review:
|
||||
|
||||
1) In-Scope Targets and Out-of-Scope Targets below.
|
||||
2) The [langchain-ai/langchain](https://python.langchain.com/docs/contributing/repo_structure) monorepo structure.
|
||||
3) The [Best practicies](#best-practices) above to
|
||||
3) The [Best practices](#best-practices) above to
|
||||
understand what we consider to be a security vulnerability vs. developer
|
||||
responsibility.
|
||||
|
||||
|
||||
@@ -185,7 +185,7 @@
|
||||
" )\n",
|
||||
" # Text summary chain\n",
|
||||
" model = VertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-pro\", max_tokens=1024\n",
|
||||
" temperature=0, model_name=\"gemini-2.0-flash-lite-001\", max_tokens=1024\n",
|
||||
" ).with_fallbacks([empty_response])\n",
|
||||
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
@@ -254,7 +254,7 @@
|
||||
"\n",
|
||||
"def image_summarize(img_base64, prompt):\n",
|
||||
" \"\"\"Make image summary\"\"\"\n",
|
||||
" model = ChatVertexAI(model=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(model=\"gemini-2.0-flash\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" msg = model.invoke(\n",
|
||||
" [\n",
|
||||
@@ -394,7 +394,7 @@
|
||||
"# The vectorstore to use to index the summaries\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_cj_blog\",\n",
|
||||
" embedding_function=VertexAIEmbeddings(model_name=\"textembedding-gecko@latest\"),\n",
|
||||
" embedding_function=VertexAIEmbeddings(model_name=\"text-embedding-005\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create retriever\n",
|
||||
@@ -553,7 +553,7 @@
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # Multi-modal LLM\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-2.0-flash\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" # RAG pipeline\n",
|
||||
" chain = (\n",
|
||||
|
||||
@@ -22,7 +22,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 1,
|
||||
"id": "e8d63d14-138d-4aa5-a741-7fd3537d00aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2e87c10a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -37,7 +49,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 3,
|
||||
"id": "0b7b772b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -54,19 +66,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 4,
|
||||
"id": "f2675861",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
@@ -81,7 +84,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"id": "bc5403d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -93,17 +96,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"id": "1431cded",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import WebBaseLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "915d3ff3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -113,16 +124,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "96a2edf8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
"Created a chunk of size 2122, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 3187, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 1017, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 1049, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 1256, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 2321, which is longer than the specified 1000\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -135,14 +150,6 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71ecef90",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0a6c031",
|
||||
@@ -153,31 +160,30 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"execution_count": 9,
|
||||
"id": "eb142786",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
"from langchain.agents import Tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"execution_count": 10,
|
||||
"id": "850bc4e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"State of Union QA System\",\n",
|
||||
" name=\"state_of_union_qa_system\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Ruff QA System\",\n",
|
||||
" name=\"ruff_qa_system\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" ),\n",
|
||||
@@ -186,94 +192,116 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "fc47f230",
|
||||
"execution_count": 11,
|
||||
"id": "70c461d8-aaca-4f2a-9a93-bf35841cc615",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4.1-mini\", tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "10ca2db8",
|
||||
"execution_count": 12,
|
||||
"id": "a6d2b911-3044-4430-a35b-75832bb45334",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What did biden say about ketanji brown jackson in the state of the union address?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" state_of_union_qa_system (call_26QlRdsptjEJJZjFsAUjEbaH)\n",
|
||||
" Call ID: call_26QlRdsptjEJJZjFsAUjEbaH\n",
|
||||
" Args:\n",
|
||||
" __arg1: What did Biden say about Ketanji Brown Jackson in the state of the union address?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: state_of_union_qa_system\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out what Biden said about Ketanji Brown Jackson in the State of the Union address.\n",
|
||||
"Action: State of Union QA System\n",
|
||||
"Action Input: What did Biden say about Ketanji Brown Jackson in the State of the Union address?\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\u001b[0m\n",
|
||||
" Biden said that he nominated Ketanji Brown Jackson for the United States Supreme Court and praised her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"In the State of the Union address, Biden said that he nominated Ketanji Brown Jackson for the United States Supreme Court and praised her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson in the state of the union address?\"\n",
|
||||
")"
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What did biden say about ketanji brown jackson in the state of the union address?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "4e91b811",
|
||||
"execution_count": 13,
|
||||
"id": "e836b4cd-abf7-49eb-be0e-b9ad501213f3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Why use ruff over flake8?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" ruff_qa_system (call_KqDoWeO9bo9OAXdxOsCb6msC)\n",
|
||||
" Call ID: call_KqDoWeO9bo9OAXdxOsCb6msC\n",
|
||||
" Args:\n",
|
||||
" __arg1: Why use ruff over flake8?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: ruff_qa_system\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out the advantages of using ruff over flake8\n",
|
||||
"Action: Ruff QA System\n",
|
||||
"Action Input: What are the advantages of using ruff over flake8?\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.\u001b[0m\n",
|
||||
"There are a few reasons why someone might choose to use Ruff over Flake8:\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"1. Larger rule set: Ruff implements over 800 rules, while Flake8 only implements around 200. This means that Ruff can catch more potential issues in your code.\n",
|
||||
"\n",
|
||||
"2. Better compatibility with other tools: Ruff is designed to work well with other tools like Black, isort, and type checkers like Mypy. This means that you can use Ruff alongside these tools to get more comprehensive feedback on your code.\n",
|
||||
"\n",
|
||||
"3. Automatic fixing of lint violations: Unlike Flake8, Ruff is capable of automatically fixing its own lint violations. This can save you time and effort when fixing issues in your code.\n",
|
||||
"\n",
|
||||
"4. Native implementation of popular Flake8 plugins: Ruff re-implements some of the most popular Flake8 plugins natively, which means you don't have to install and configure multiple plugins to get the same functionality.\n",
|
||||
"\n",
|
||||
"Overall, Ruff offers a more comprehensive and user-friendly experience compared to Flake8, making it a popular choice for many developers.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"You might choose to use Ruff over Flake8 for several reasons:\n",
|
||||
"\n",
|
||||
"1. Ruff has a much larger rule set, implementing over 800 rules compared to Flake8's roughly 200, so it can catch more potential issues.\n",
|
||||
"2. Ruff is designed to work better with other tools like Black, isort, and type checkers like Mypy, providing more comprehensive code feedback.\n",
|
||||
"3. Ruff can automatically fix its own lint violations, which Flake8 cannot, saving time and effort.\n",
|
||||
"4. Ruff natively implements some popular Flake8 plugins, so you don't need to install and configure multiple plugins separately.\n",
|
||||
"\n",
|
||||
"Overall, Ruff offers a more comprehensive and user-friendly experience compared to Flake8.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Why use ruff over flake8?\")"
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"Why use ruff over flake8?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -296,20 +324,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"execution_count": 14,
|
||||
"id": "f59b377e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"State of Union QA System\",\n",
|
||||
" name=\"state_of_union_qa_system\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" return_direct=True,\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Ruff QA System\",\n",
|
||||
" name=\"ruff_qa_system\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" return_direct=True,\n",
|
||||
@@ -319,90 +347,92 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "8615707a",
|
||||
"execution_count": 15,
|
||||
"id": "06f69c0f-c83d-4b7f-a1c8-7614aced3bae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4.1-mini\", tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "36e718a9",
|
||||
"execution_count": 16,
|
||||
"id": "a6b38c12-ac25-43c0-b9c2-2b1985ab4825",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What did biden say about ketanji brown jackson in the state of the union address?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" state_of_union_qa_system (call_yjxh11OnZiauoyTAn9npWdxj)\n",
|
||||
" Call ID: call_yjxh11OnZiauoyTAn9npWdxj\n",
|
||||
" Args:\n",
|
||||
" __arg1: What did Biden say about Ketanji Brown Jackson in the state of the union address?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: state_of_union_qa_system\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out what Biden said about Ketanji Brown Jackson in the State of the Union address.\n",
|
||||
"Action: State of Union QA System\n",
|
||||
"Action Input: What did Biden say about Ketanji Brown Jackson in the State of the Union address?\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
" Biden said that he nominated Ketanji Brown Jackson for the United States Supreme Court and praised her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"What did biden say about ketanji brown jackson in the state of the union address?\"\n",
|
||||
")"
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What did biden say about ketanji brown jackson in the state of the union address?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "edfd0a1a",
|
||||
"execution_count": 17,
|
||||
"id": "88f08d86-7972-4148-8128-3ac8898ad68a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Why use ruff over flake8?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" ruff_qa_system (call_GiWWfwF6wbbRFQrHlHbhRtGW)\n",
|
||||
" Call ID: call_GiWWfwF6wbbRFQrHlHbhRtGW\n",
|
||||
" Args:\n",
|
||||
" __arg1: What are the advantages of using ruff over flake8 for Python linting?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: ruff_qa_system\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out the advantages of using ruff over flake8\n",
|
||||
"Action: Ruff QA System\n",
|
||||
"Action Input: What are the advantages of using ruff over flake8?\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
" Ruff has a larger rule set, supports automatic fixing of lint violations, and does not require the installation of additional plugins. It also has better compatibility with Black and can be used alongside a type checker for more comprehensive code analysis.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Why use ruff over flake8?\")"
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"Why use ruff over flake8?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -417,19 +447,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"execution_count": 18,
|
||||
"id": "d397a233",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"State of Union QA System\",\n",
|
||||
" name=\"state_of_union_qa_system\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Ruff QA System\",\n",
|
||||
" name=\"ruff_qa_system\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
|
||||
" ),\n",
|
||||
@@ -438,60 +468,60 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "06157240",
|
||||
"execution_count": 19,
|
||||
"id": "41743f29-150d-40ba-aa8e-3a63c32216aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4.1-mini\", tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "b492b520",
|
||||
"execution_count": 20,
|
||||
"id": "e20e81dd-284a-4d07-9160-63a84b65cba8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" ruff_qa_system (call_VOnxiOEehauQyVOTjDJkR5L2)\n",
|
||||
" Call ID: call_VOnxiOEehauQyVOTjDJkR5L2\n",
|
||||
" Args:\n",
|
||||
" __arg1: What tool does ruff use to run over Jupyter Notebooks?\n",
|
||||
" state_of_union_qa_system (call_AbSsXAxwe4JtCRhga926SxOZ)\n",
|
||||
" Call ID: call_AbSsXAxwe4JtCRhga926SxOZ\n",
|
||||
" Args:\n",
|
||||
" __arg1: Did the president mention the tool that ruff uses to run over Jupyter Notebooks in the state of the union?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: state_of_union_qa_system\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out what tool ruff uses to run over Jupyter Notebooks, and if the president mentioned it in the state of the union.\n",
|
||||
"Action: Ruff QA System\n",
|
||||
"Action Input: What tool does ruff use to run over Jupyter Notebooks?\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m Ruff is integrated into nbQA, a tool for running linters and code formatters over Jupyter Notebooks. After installing ruff and nbqa, you can run Ruff over a notebook like so: > nbqa ruff Untitled.html\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now need to find out if the president mentioned this tool in the state of the union.\n",
|
||||
"Action: State of Union QA System\n",
|
||||
"Action Input: Did the president mention nbQA in the state of the union?\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m No, the president did not mention nbQA in the state of the union.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: No, the president did not mention nbQA in the state of the union.\u001b[0m\n",
|
||||
" No, the president did not mention the tool that ruff uses to run over Jupyter Notebooks in the state of the union.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"Ruff does not support source.organizeImports and source.fixAll code actions in Jupyter Notebooks. Additionally, the president did not mention the tool that ruff uses to run over Jupyter Notebooks in the state of the union.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'No, the president did not mention nbQA in the state of the union.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\"\n",
|
||||
")"
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -519,7 +549,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.12.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
"id": "f5ccda4e-7af5-4355-b9c4-25547edf33f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Lets first load up this paper, and split into text chunks of size 1000."
|
||||
"Let's first load up this paper, and split into text chunks of size 1000."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -241,7 +241,7 @@
|
||||
"id": "360b2837-8024-47e0-a4ba-592505a9a5c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With our embedder in place, lets define our retriever:"
|
||||
"With our embedder in place, let's define our retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -312,7 +312,7 @@
|
||||
"id": "d84ea8f4-a5de-4d76-b44d-85e56583f489",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Lets write our documents into our new store. This will use our embedder on each document."
|
||||
"Let's write our documents into our new store. This will use our embedder on each document."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -339,7 +339,7 @@
|
||||
"id": "580bc212-8ecd-4d28-8656-b96fcd0d7eb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Great! Our retriever is good to go. Lets load up an LLM, that will reason over the retrieved documents:"
|
||||
"Great! Our retriever is good to go. Let's load up an LLM, that will reason over the retrieved documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -430,7 +430,7 @@
|
||||
"id": "3bc53602-86d6-420f-91b1-fc2effa7e986",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Excellent! lets ask it a question.\n",
|
||||
"Excellent! Let's ask it a question.\n",
|
||||
"We will also use a verbose and debug, to check which documents were used by the model to produce the answer."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -663,6 +663,7 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs")
|
||||
if dir_ not in ("cli", "partners", "packages.yml")
|
||||
and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / dir_)
|
||||
]
|
||||
dirs += [
|
||||
dir_
|
||||
|
||||
@@ -1 +1 @@
|
||||
eNqdVXtw0/YdN4QeOdIy2ErCs/jc3ng0SiRZdhynvtZJIORlB9shIaUNivSTpdh6RJId2+FRkrZwJFcmxmODdUAT4i1NSSGhQIBCjwMyYNDBVgglZe3WtHBj7AYbG2ywnx2nTQ7+ms6WLX1fn+/n+/g1RoNAVjhRGNPJCSqQSUqFD4rWGJVBXQAo6pvtPFBZkW4rc7o9rQGZ65/PqqqkWDMzSYnLECUgkFwGJfKZQSyTYkk1E/6X/CDupq1GpMP9KxoMPFAU0gsUg/XVBgMlwkiCarAaKqD+HEWvskAv+mkYTe8TxHpBD0JkzIdeZPRUQAAcI8q8Id0gi34ArQIKkA0rX0s38CIN/PCFV1IRQkR4TuCglqLKgOQNVob0K2BllAUkDVPc0MaKiqrtGQ26i6QoAK2BQIk0J3i1D7wRTkrX04DxkyrogFAFEKdE6/ABICGknwuC9iEr7UNSkvwcRcbkmbWKKHQmUkPUsAQeF3fEkCOQB0HVepwQhL0wsywM2RX0WIbZkoF/GEIUleQEP6QL8ZMQT7sUlx8eKZBIygedIInKae1DxntG6oiKtruUpJzuUS5JmWK13aTMm4nuke/lgKByPNCieWWPh0sIvwsXNWZgGPzsHeVZCQuUtjvO+YFR1kCVwwglQifaLnTPMEF+IHhVVmvFcOJXMlAk2CygqR2aqQGlsQ0WA5zriya65j1n8XAVv9ClteXDwmhH3aSarsdxfSkp63EUN+kxzGrMsqKYvqDU05mXCON5Yh32emRSUBhYiwXDdY9SbEDwAboj74kVPxqrOMwmBh92IQJCkqgAJIFK66xEXEPjghTmdw+1FyLKXlLgIvGw2tF46esjoXqaCtA0G6zn0ewIYeRqQIBiehImkizGwkBACK9obThKZO1JiIbZ74DJogiGIijWG0JkyIWf4zlIaPyeGFpoa0JR9ODjCqroA3C8owQavz4eqSEDHlYtFvx7N0R2dvaRJysNuzJClWyLuXe0lgJGosFwXjn4uELCxXuo0hka1kY4Wut/AT5UG000AcwmCwlQ1EQxRHaWmTFjVLaZoYwANxGH4KRzFPQSq6YkyiqiAApuKDWs9afzZCg2aTYjZjKaYaY5ek6g/AEauAM1+WIsByVHL8nAL5J0F8UgFEmxABlqQC2av9RhLy3M63BDkHmi6OPAxqtjplZXU0x1DW9zLEGZBRWFFQ6fkw7XLq4OSFWSKVBYRtW7w+hCwuN1qbloTdlip6wgWBaBmwkjgWMIloFmwMFByh0s4SorF501eAVWgMsWur7WxzoDJWGWoZwLS7BqB4MXlzpyIxELkasGI2UiXhgoVkswexnG5Xu58gVsJVPktnsrc12eoqBa5zIulLCsEMsRxVWeqgDPGesKzIuKCJi1RKqsLTMHrlSJg6TbEnODwLlBYlODW9HhqcnR03FibBmjl2SOfhHc+E7BH87Ru2MMA/hL8sDNqcDmEAXQvwkSEwhytM1VwNqX5DsKHRVLK+RyI+lhKS9rLGAjbp+bKRQkEKrKD3oY1lTqG8GMhTAhaIIcM0pY4q35PfT/E9VHlcjINYA4paGjLSqIisAxTLsbyHCqtA7KLwZouO9l0J63EHHZl2o92ThhMhMMxVgI0sKQOJILN+mwt++WRlvssIiSfth4QUrrZo02g5UgjIYcPU/aLGY4Y/EDcE17rFEF78mxP5vdnKyLX0nw++hRi+fz2hn2p1feq1gXrv1kouOrwX9embHJVXUxZdMPW15qHSg3vlhq6i4s6Tu2dWl076WUu2D24KKCb77dd+BHutOfvLvmzqzU61FLw8OL52/d6s7csb0rJTJbPH9sRV9Fc1fX9dnRoHRp2673b//5bFenrWvlek8lSGp4Pt2z9N6GtPEFzSdOPryRfHf63M96zJv7c722lAsTB8c39/8n9NFqecfAvIYzf33641KdTjgt/mHeGuHatHPRY69Hzkx7Vfb+wK47T3yWtjZbyZh7svTAzop5L7Tedc5ac+qVQ8d3f7tfN7D8H5p9xt+aJmXV2pbNSNuSOja1ccr94rkTWOuiN040/XvmL4+Fnkp/46uftx3PObOtZ/kFy1u/6FNOPEctu7zz4uSp+ztdzStmRvCQ63ANnnLw7c68G33jvpj6MNQ7fof1zLg3Uy/nbKzcl+XY/+W0Pb9J3rb4nQn/Wuebe3C9B985btEp2/TLKzZfnTRhPxHpamo51XKkqs/d13Rbd2fuvqn3z66vTfqg9spPpoFZ59zPbbbvfHH9tvu1D+ouoEm5g0sW3nHpTkxZH8U2PdW7q2BM18qXkxs6He2ftvJFE8p757dav7HuKsqZOLhqZsPhPFPt2y2BvfS6uvGvTEaef3DQkXavb+DE2AX8T2/Mf7Tl5vTU8wMXk7uLXtuyjDg0+eh23bKWZutLV21bSx54tizI5NIPrO7ZpXx+50rjnNP3lv+usW4Ot8ry35QBvMz+Tth76d2C64uvtAsXxq493ptUEix59OtB8dqMP6XfevSpOOb2vCkTztLPXnk5eOQZY8fqDavfshHhZ/+C909pqEr98ftNyb//46SsrFt/t6bd3PpMcfGWlqy1A21f36S/du74cubR7ddKV42NtV+S7revL984OE6n+x/1QgjS
|
||||
eNqdVXtUE1cax9faVo+PwlrcbTWNlVZkwiSZhAwxbREQeYQICRB0VxxmbjJDkplhHiGBuirW3fVR6NRHK/VYK5BY6kFaWKx06aq7ler62NZlF9pdPV2sZ0/Xx9YHdreu3ZsQWjj6V+ckk8zc7/H7fr/vu7c+7AeCyHDshIMMKwGBICX4ICr1YQFUy0CUXgz5gERzVMsKm93RLAvMYDItSbyYnppK8IyG4wFLMBqS86X6takkTUip8D/vBdEwLZUcFRx8oU7tA6JIuIGoTl9VpyY5mImV1OnqMmj/tKiSaKDivBTMpvKwXA2rAgEiEkPFuVSkzALGxQk+dYpa4LwAeskiENRrf56i9nEU8MIXbl5CMA7xMSwDrURJAIRPne4ivCJYG6YBQcESG1toTpSU9vGgDxEkCaA3YEmOYli30umuZfgUFQVcXkICKapaUaLaIGAWRIlR2jwA8AjhZfwgNOKrdBA872VIIrKeWiVy7MFYgYgU5MH9y20R/Ahkg5WULhuEkpGbuiIIOWZVWk0aqkE7AogoEQzrhaQhXgKiCvHR9ffHLvAE6YFBkJh+SmjEuX2sDScqrVaCtNnHhSQEklZaCcFnxDrHvhdkVmJ8QAlnrrg/XWzxu3RhvUarhZ93xkUWgyyptEaZPzzOG0hCECE5GER5E20fJcgLWLdEK81aHXZAACIPWwZsDEE3SRbrW6Ak4PRH4Vjv7Lflj2p5IS6xJQvKo/Q6aDlFpcNVViKo0qE6g0prSEfxdC2qyrE6DmbG0jgeqMM7DoFgRRfUIntU/TBJy6wHUG2ZD1S8N6I4rCYCH/YiAgI8JwIkhko56ESKR4YGyc3qHGkyhBPcBMvURtMqvVHpa2oDNRQpUxTtr/GheC2mZyqBTLq6Yi68wEXSQECIT1RadDoca48tjbLfBotFES2KoNreAOxePxdEZH6k8xGYw8+QAImKGXE39QQQAfLlZXwMJD16j403jG9AUfS9+w0kzgPgRhDG0Oj1wVgLAfigshGA34fBcBz/7YONRkPpoQluMvaMtxLBWDRanU98736DWIj9qHgwMGqNMJQy+BR8qNAZjDodpccBwE0USWldqMlIpBkwFFAuDAP4EcgMQ8IoEcV5TpAgRyTcy6SgMpjiIwKRabTotQa9EVZqVjEs6ZUpYJcrs7hIDaJZxQvAyxHUIdKFkARJA2SkSZVwVnlhhjU3s80OQWZynIcBr3w6IbGignRVVPosRo+uJruKcuMrOLEIz3IKOVnLNC4es5dWcIyXqyn0SwZraX4p7ccQbRpmMui1egMUVoNq4HAhrhyrS8xzLZcyZWpZmkHAHCa0iGVdSwMlJiNZ5sQqUK9dyrU6soPVOCpUlcullZ4MqsKzMsvHFZUVL6UraatdtubZglWFOF29vEAy2opILcMHUWsaXhL0cUG+CmM8jiJYIiHRllQz3Hx5BpJuic0WAmcLGZks/ehkmVVUlBiLZvx2alYth2eDjfUGzSp7hGEAfwkfsDMSsBRyLBjcAYmR/QxlsVUSORm5ATLHSaOE1Z1dbs3RWWXjUr2MVUmUhOb5nXyw2qErNGWPYQbHMQSNkWNEMVO0Nb+H/gNRdTuRsVsFYuNHDsEwy4ks43KF7HCigKC0kV5OpuDJIIBQ5jKkOKNc6cKxNCPAMdRFulyUUY8juRlZHaPRvttYWiLHSpjwwsbzk0onrbeo0zFMrzarfITFZIQzFj0qN4Qijcq6P5x4bP7Wh+Ki1yT4/fbbbQ4rtyBteu83ZbPSP9tw1fTTpw9bP55oSXimh9na7Ux8fLP+9ysfLdlM9j104+K0l7YPvP96XN3c6V9//dWF+Na+h9cTbvbYwLz+X15y1wxfb3yj/S/s4btXV98rIOpunp+/tqXiunSnzB36zdKmdSe/zO/ftPPlJYvylxw3xif3t2/9V/9/vxSqj6xsfH5Wzge75Fz34t1TGpasOvvq24N3a7udz5XdOb5ry5FN65Li4hqHDt/Y1jDvNHI6nKo837om5/Kw4eGj1FeGyo4EzZaiM7cmr9ycV93yi4vxv65xJHQXDc/+54S3GyZ+Uv+EunoxfvTZTuHK0TcfxejD+ya+Vrxr6owDl2byR/ac+/zar+ZvWLCmq6Do88eSX8nSooVPEqXdG/uuzDTk1tUXPJH/v+wpmllg5nPuu9cnXqtLnoxd3kvzKUaxLemxAXvS8t2TXMNTEx/B6YWZSYsHz3RnvVXMzhhqsWXuO534xwS05yfp67vyZpuu/eE6vW1DvHnyli3ystv74qvBI9TCvDXTmrRzuhZsIhdLtydvdTkuzHZ4+045lv24+Uft97o33rk+4Fk4q6v8+EfH3i1l/103r9s+cKckfDsQ9+GJ7XMGOodfHmiT5trPzTn1JPaPaX/qTGhIJg+pdse9fups8pWTMrbYkiS4hxpDq/ccWh7yP+P8KzbjwhsTEpsf//vcV2/e1N+41dOwumdH66KKnU3dTX/mdITiLDt04Cn9flv8+nNDn86s8L6bW770k5dO7TpRvXVmn/+yNHj26H9uzC0puuU+WmlZ23zxhKL+RvBXb8v+bLt551527oyGL6a/tuete7vOruptPIX7L56/N9mZn3vSV5zUN5x/MtxVcFtbm3q1/wulY9GOGi/TXHBk74vNc6aue0FdgCe0djC4f4f98pD549CZ/i1NJUPP/u1nTSfPX1oX7dNJcb/rM882TImL+z+l5DCL
|
||||
@@ -1 +1 @@
|
||||
eNqFVsuOG0UUFQsW7PiEUk8QieRu98OP8UgsJhMFmTAkyjiAIBEqV13bFXdXdaqq7emMvCA8Vmws/oCMMmgUQTYs2c2CBT8Q/gWJW91jB08mIHlRXa97zrnn3vKTkxloI5R867mQFjRlFj/M8smJhkcFGPvtswzsRPHjO7cPBk8LLV5OJtbmZqfZHALXik19XUgrMggK4wM11o8CmtHHStK5CZjKmpnikDaptBOtcsECltKCg5/4RkkJ1o/DuBXGcc+fRe8lu2GTKelAwfFQ8fLlD0deBsbQMRhvh3x55GmVAo68woD2GsTD7RakrRctHLqR99mE2vcNsRMgKuXIg0ylmksChzTLU5wcEVZIECOlM2/xYPEALzKlsZC5e9yXkCPQIBnsKTkSY5w+WixOJkA5Yvv++V4d1R+UOSzP3qV5ngpGnXjNh8jr9B6i83fHuGV59uN1ZVXSjIKkE0QJyXjTTTClYWs1V9BmHIREmWZGmTJbcRJ08Bu3Us0mW1RnnRZJqRw38xLTIbeSIIrw53bkpUBOW3t3qhXCRuOmBqtL3wm/lcKYspKsIq5QnH7u72aP/RvUOviYgnaYxPEgipJu2PviN4prhk99IWeqpuULvjy70uuGNGZR6PPtXuS3ut22v70Nod8Z0i5tb/c6MOK/rg6fW2h59g61qGxuP4h+Wen2McixnSyfRq3wZw0mR9PBN8+MpbYwT44ZIv/zj5PzxP90+9ZK+O82hX9xUffjitDvB9Q2SByTfaqJ40aiaCfp7kRd8uH+4MWhjwilf7eG1+fLl1e2kzawYbfl97ZbyAvaHb8Xh6EPrWE7avNRyFrt0z3n16pAlqdTgNynqZjBRUrHUTtJKu9+jXy0kOO/3v7bmRjHDD185KWIUbJyHz86vV64aHiqsHlh3do5ZTdcG3vta2/wBj+bDUOTuRYW4xKOgciQsimxilCtCslJkiCtBLmR63sBOVBZVQyuUIDqVLir15fOsQLIqDomZLWHSiYQEzkoMtCCotswUh3+ni6muM7JR5BxsOQTarQ7ZpDcBLQk+2BUrizNBCVXnTe19DktSV/TR9eC+/K+3CUjmJMplBu8zGUYX5EV0jAt8qpvVfGptmKEjczgEkux1ey4u6NrxKl3S5gJGdBhigivsoAk7UqJazvktlxH2ZC4vh+bHVHDh5h+08A9whCT0TQl2MxKYuv75tT8v1qlC+JQBKRvyRAJOZwpFZxoAO6bCc0xVkb1FFPoWhi1SAkI2sEIFA0Xc43V7K8lqLSLa35VFvqfVkLU436fOPeYmm6V/WjF+eZlaFcg3XHHFQz8m6ZxSCytMvtf5sGFDKvZIV/nqs4UwdbNUSmNjLEncWdVyjMhBdYL1vIMsTCG0KypqCU1tZtU0/PU1WTizpqHVlkFwwhbQTqYFLrIc3o5AZGmRRUKqkMS5SHYesbVUQ4zSFWeOSmQ5KvcXSyvClurxtaXeF9RP54b8S8Afd1k7jIsc5IK9/zqksyVnpraeyunIYBa0A2vn6cBTc9ngkGVcnQGvp1DUxWexRmq+WUU6msqBgeq2lxkaLmysZlMdIcw2JkwXa93FzjEnp46/1Z95vIWQ0YuNW9qG29oDw00Ro6vAh5xgRyksaa8oCmBmUoLx7++uK6EWjvE8KpqEAwK5Jx4uAZcP/ABvveN838RHjVGOJ2st8A2bKzK7+J/GCVxCST/Cu0rvQb+1ThvyUKiGAM1BYm9O+quGvdqJml3Gp5FDul6ppssFv8ApHFdNA==
|
||||
eNp1Vs1u20YQRg899NZHWNApmgCkJJKSbRnowXGBIk3dBLGCFG2KYrgcSVuRu8zuUgpj6ND059SL0DdojDgwgjaXHnvzoYe+QPo0nSVFOU5dQID2d+b7Zr6Z5dPTOWojlHznpZAWNXBLE7N6eqrxUYnG/vA8RztV6cndO0ejZ6UWr6dTawuz1+0mmGrFZ4EupRU5dkoTIBgbhB3I4YmSsDAdrvJurlLMuiDtVKtC8A7PoEwxiAOjpEQbRL2o34uiYTAPP4j3e12upAOFJ4lKq9c/H3s5GgMTNN4e++rY0ypDGnmlQe35zKPjFqVtNi0+diPvwRTsh4bZKTKVpcSDzaRaSIaPIS8yWhwzXkoUY6Vzb/n18msyZCpjMXd23EzIMWqUHA+UHIsJLR8vl6dThJSw/fTyoPEajKoCV+fvQ1FkgoMLXvdb4nV2n9AF+xM6sjr/5aayKu6GnXi7E8YsT7tugSuNW+1aCd2o02PKdHPgymxFcWeb5nQUNJ9ugc63+ywDOekWFaVDbsWdMKSfO1FUgjhtHdytdxgfT7oara4CF/itDCfAK9Z6bFGcfRHs50+Cj8E6+JSCQW8QDUfhoDcMh1/+AbRn0lkg5Fw1tAKRrs6v7cSczlL2YJcPg348SIMhJONgnMRp0t/l/Z20/3t7eS2h1fl7YCmyhf0o/K2N22coJ3a6ehb2ey80moJEh98/NxZsaZ6ecEL+91+n68T/eud2G/gfXz0OyLwM7jW2b6Wr19eSKNyOx/0oGAx2toP+zi4PdpP+OEhwp0//CeIQ33Z8Evb7u2cHToG15FdnM8QigEzM8XJyX72d25M6aH+OpqXPoiE7hIq5+LFwsNcb7kU77JPDUa3d74iPFnLyz7svnIhpzEnDx15G9yWvDmmyu70dLX1PlbYordtbU3bDjbA3uvZG/6Nnc0nQbKGFJb8sJUcsAT5jVjHQqpQpi6NeL4h7vR67ecCAFhYkc5YKwxVVHaZMyLpsNE5cJ3CGQXJBQNghGlUoC7kAdt2JS0tyUbFbGh7d6DyUD+WRyuvacgYQdCYukF6gE9JwLYq60dCEZ9QN9tztsMMcwdvCTNkIkgwtu847LB7UYG/s0a4wzOSQZYyaSMVsc2hcE1vjbsEelTlqAeRY2Mphqs3S/QQJ1pyIUkwSQis3iNeRfTOm60h22C3LXD6AwDNqYlZNNBRTwS+zcfGcoGpyzciamEhTRyZquN3X5WzNzTTkYpeOsGW4T0WuJ0i+sqwRZp3ZC7KG4HGYU2JTNtYqv8S5pVp7eTNDD0DPwF8nygExuLG3psUKTS0i+I+IfLYgmlO2AKJHh5CX2ijtoudcX5xvwlCTjRuymxQcbA6NSMdr4sQ5iIYXqXWYgKRortKQA0PFQKqkJpaaNtlve/cvtDAHLVRprtQDmWSZmDXp8NmnmKcko8/BaL9O4dG01GVRwKzmUbUiZZDmQgoqamoGc2zB+GTLWNNcNaJ+XyTo5lCT/dEVWOuIkjkrSM8ViWWOmSoorUlVc2sB12SvKsG6eMmGayeasKnCicKtQgr1mCwpMqWJ81xk4gm0NfdGhfvMlJReaB7L/dkMUufUZzchqTIlm4mzum9MVQOqq0FjTqpp+gU9xRR4zVwHYbHvmktFqaN7OcxcIxL2ijpr+kLbq5rXdwNuoXSWduhp9tcPvgfGUJjpM8JbUsc0VhX36HNDSdpCmX5jSy09n74K1t1TSOqoIzVDSW023Gl7bLsSxwPfsxTJbLMyiJbLfwGzkz5r
|
||||
@@ -1 +1 @@
|
||||
eNptkwlcE3cWxwVr24+4W9ai6K7KEDlKzCSTiwQQNIYbwmGQwwuHZHKQZGaYmUACHpWirojYrEJVKseCSaAiSJWKSrXuai2HXWkRsejKsVLEil3XsirgTkT8tJ/Pzud95vNm3sx7v/d9759vy0YIUouhTie0KIUQsIKiH0hLvo1AsowISRVYDQilwZQ1CfHypGojoe1drqEonAzkcDDYSGl4bDWGqfUIjGtJtgIzcChMh6A1GZjSfNtpnpqAUSqdMuNIMIGoCITUpL+K+yj0WoSOaJXBIn8BJPaHhFyxEAL9swgB7q/Gtf4aVAj545QYyTRmiflKLV/A1RgJNozj5OuKRhIhFBgtGqUchWdSkoiCQKhgJRgeGyBM5AZkiWSJhnBcKdJEhkJJZp/f6AjmevPCaYO4qrg4WRJC4sJwqVSdFiVZK5FESMLkcTnhYGxAFEGJDDE80pQiU+dmrAsFRalcNYbo1qfGSY3adBjKwMMzVVkGFbkuO1QVLxcl6mFKq0kLCIVjhfI0MkEYFxBm0yCwkoa9p24drRyUqGm1lmbcTNNFwde0SQ6PzaetQaJQIDgFhqEKTKlF1ZZ6da4WZwFKREVnRqzTYUs1k8Osk2Ioirwam6VOhyA4COu12cgJ6TQaMImGb+n1pMHptQrY8RnHBObk5IAqjDCARkKPOGogylMm0MEVpAcJTqO09Pqp9eC0QA6fzeXSBjhm7hApZkMADVoJOobLMZ6cKReLoGpKY6nm+YvtNGecXibkIytJwZSRzK9xVOq4ZjMgJAmrkb/Gx8xA2X8qiV4VUkWDedOzTaExojpE2TiT+02kxkGjJhkmzAet8YRWrUXtqeC0Y1uL0EkQ4rf9X/x1+5kkhgYBCg1MkPSiGCkVKK6XwgoNAjr+ITC9pZeNYqDC8YoF0B5JYQTtGWATSKsOhmjXSFL0zLJp1Ep6HjWh9M3SKocpFsDjATKYAHgQTwhwuYF8USAkBiJkSQ2pYDgBGxAwHp8+YnVyiSwsfm1URFScNYGA1QbYYp8p25wK/lr/m39sKEaiWpXKKkcI+uBazpEKWKXC9A4qAIYCkUlJCdMhni3MhGvpCVhaZRjKAiAuEA2jADcgAAIgKPCVOWQ1poKpcjmYQGDU6yWqgmwSPQXKsxU0Bw0/mBEoEPAZQXT7wTxhAA+CIJaGD/IC/k/g1bHfSU+boPVcmf29x753Zzku55etid+U1kW63rnYYJeP3nR+q+QLhWusm+uRXhNwxDtK6VoV/zJP0T0A5vV8Jhbtzls+MDU5OjjIL4BGzm04MBY1+ezFIDnXL3vp/I0Dmzi/k1ozhw+cjH505e2mJfbsDr/ceY+V4SP3hzp8ktN0PbuiO4kHheXHBD38W2cvn7rLyLX7hvSd2btaKhiSMWozlPH3Tfr/7BRdv/pVyKLSFkbs+S0rxgDVGjw6Tuq+sfj2bogxSVRdpcZ+1l6cO+7v0bnnUG6I27bk9vZy9jLDn+Ye2TGgvz508h05r7gponbX7XJm6xDWcUnVsC/j2XNo0ALkdAynj5UdTJGfjspZcdcdW1Q6eT3bVfTNiznmf4rujDU0ZC3pWXw1raRAdqPeMzy9/x/D6Y2XpRcy6xrjs/aFTH3Wmpe0qSvE5fCJFZ8ueLrj9/Ni8hJtpmOVjfaQ0voHjKneiR+sG1ZNstp84bLTq0oYU4Mwz8/s9bc1HLfvyp1qF1TOtmPOTnongtl97tudmoSji4a3mzu7e63ClDaje+z22gVHD//hyxLD1vHVI01NWxI+nbMrcuWjTjMRWR25/ynju0v7n8f1/9e0tL+oQh3N3FboXf08sCJxosh+1iXWWvTBgMzi+7TMWX+6a5HxhodwdLVuYY/ujEAbUTzXf2t11dZUwm3lH6l7/l6nmwI+HvvcJ1r8y6V7eRPv+lqLP2guVu4pOMQHj0pZQVLWxI3CTYMvu44q3C+c2VxnqqBGr9sZX9Pb//ei78FrLM728X4Pl83mggHeoeM+zMN5956NrV/Q4jJ1Uid+LGA8dmP/5bhnpeDh+W48vIc51dw14t5nLqvMt6+pGu2T9Lo2ZteNnfde+KJ8t2SvSyRT7e7nm/LIkvhxrSrdcv9ft6z5KVBrS93KzYJ1LvMzR95/oI89By3c+cmB9omYDY//fXE/0zn3VJ9sWdu9osvSlidf3KCGVUMVQSV3sJ/7R4bi5t/exsxaeuFY4TL9n9upORHHD4GXYkr7ZZLAZDLnFnrQq6ts5Eg3rHtfIX7L+65nfeHD9YV47c0PdU92RiQsNE+ae/Pbxjc0Hbza+kC0+L2awQ/nHWbedqngVHYMSm/uW3zWa/0TVyDfgn2UtcX69pI5UeyYQZNGzZjFicqX7dV85XaNndu58BPPW33JW3/q9mP25bWEiXbkdxxP9wJLGgQZurKg5sUYzPVCr7T8WApjZ9b+OGJ65xl+ejlw/2Hl52BRQXHUSOf2iFVL4zM2/vBezCQrL9TY/kvItm/NXy+p8J39U9Gkl0fz+eG2bNmjJz2rXjp/Ob71ctGcWbP+BxnnjUU=
|
||||
eNptk3tUE2caxkEPVqytLtKDLF5GBC2USSYJgQSlSwokXBIuJmCAFRiTL5mBZCaZmQBBscUrKtZmvSHqUcolFpWArciC1kM5rq7iahUvwbWwVnrQKrS4wiIqOwH1tOd0zvvHN+edb97n+z3Pt95eACgaJwn34zjBAArVMOwLbVtvp4DZAmhmY50RMBiprUlJVqqqLRTuXIwxjImO4HJJ1MJgfI6eJPUGgJpwmqMhjVyGzAdEzWpSa+12n6mnUILJYawmEEkBHQVoLGeiv0RjwAHbwbWR4WGhiCgMEfJEQgQOM1OhpjC9CQ/DCCESZmJEIM9iFgm0uCCUh1koDmoy0a8nWmhAaUhWNMG4Br/5JQ00FGAitbBULham8sTmcEWqUWrShmNxMYjKuuR3OiJ5gXwpW4jwE0thBpkpEGSqZdH6jHjJColEJklTJhVKYbk4nkqJS0o3pKXKiuNCY/OVoEgLVpPJ+QQRHc/jh0qJzEQyLVFCZxTChQKMKBaspuWq4rTQMLW1KDUmRy8UKxUiOwZQLQt7S30aqxyW6Fm1tmaTlaVLwK9p01w+R8CWQ6LRABMDxxIaUosTetsJfTFuCoG0QGdAGVA32bZVB3OD66NJggATttnq8wEwwagBLwDHoyfRwCoWvs25iAVnwDWo6zNuEVxYWAjrSMoIWygDcM0A2qYi2MUVZo2EJ1HanEF6AzwpkCvg8HhsQS7PXSJFHARiQWthl7lcS8ObcXJA6BnMVs0PEx1lOZvYMIENdTSDMhZ6fY1rUudFuxHQNKoHXyYnvoGyw6GGpRRqBHCyaTKB9UqJIjZ5RbwsPqkuhUL1RtR2lCBhDarBQKMaViuVcApFMq9PXoXYY4tMODvRdlZBEiEQwoMSUALiicUIhCAREwXJFKomFRtJWsca8JatXYNZiHygbVbDv4X2VomdIGkC1+ka33Tf7qxxuVKnBBR7iWyttAbV6UiDqwORBBSnUqVMtvi/N+Pcb73Io0liGaTBUIpmU2thdLCoJh2lrLvqkilcjxNH1fDkwr4CsKoBVRPDBsB2VoVZQiC+GFKgVoiP8IUQTxiBiCN44a5D2iUGBlYWaGxODiaI9I8IDRX4L4OMaCRfKOYjCBKCCWC++A8aJ6JdeCcwUKSB3f6GeAjErmiGpNiVES2CWfciEXZpoRk2uwVs5LSsrIlrX8q6TbEMzk/tWrh9upvrmTJ+NvX03mPq2Z09Kzv3F8k27LH6ekmhlumk1/s/rGopd0OHH5/jVnpmtzilJa3H2urbfh7MIclBdJ69ZOHMo0l3R3rIwcS4gwWL/iHavaxzNOpJ4Ie5V4N7FnbcBWnzm9VTU8XrTlkDlo88XmRaGtszS2S5QO/0SZhW5YswibntFqJsanlX22eH6u2NP/xUUvrdtz1jmv268CNPN4deyp/q2P0X88m59J6jilIO/cXJ6UEHZPN6V14xB15pqv3oObJ1SP1E2a1TvYxZAA4+6UgcuFch5829xy96EtyycUXBO8SnAw6Z1wXk0E9pGbG5h5AXUefGn7WKD7UN2YYlo60VNcn2wbyF1ZySs0fmbm4YfzkqVtzgyD7cgt/Z85IjvaIXvX9eP+4x9t9fQ35k3knNbibHhQjxBByoWtW9MfDF1eG5X/JG+3NF2v7vzxizToGhr0dPK9obX4QUJ3XInv9PcunBzi0P/T+wzd6XF+R7K/j+Ee81Tb4R4/4D56XBy290rL3g8J4Tdc5r5bW4FOF5p+NRLH7c0hG/MwW72aXr7a99VFa71tbYBbIPhvvdviS/a+Svkgc7vBIanns7+oPmrO3709Llec/SiL/pYwLOktfSYnK52/yNOd7+nsHGFRnklCUy912+UZe5vZdmf2wdiFKWZ+0oc/y5b9n8BQdGH737bIZ6QWCDqHjAus2ndUeZ0s8zbMGc4ZP451tzd/6zb2f6fWx8361KA7f7G92mEz43j+ObAJbSl9VpvlmOg1PZJf0X1v+9rLJmhse32xNah1cNbmq/6Bwi50E9HhWDCfKAX6qT3R6ITbv95i/pSe/u2DvCedjRUqrov3NKbVH/u/uXMeLVxuU+UNMntSt/9YK8vzMkeZu2OVt6x8oey6OoDbozLbk2Q+U2X01w/oltX/Xm7k/ogeuM66Yda34pb1fd8A+/7Hfw46y4w+I0zbt5nvEZvOH+HyXuiz/zaH1vlkIxS7ouIN0vqbzIb8TZ7pnJwTLVB6ZlWtY89nhwpyTqouq6t9n/SnNMoaBWt+FgtjNXPM26N+urj/rGdmELQqaZ8/EdZfNVw90nlzYs+9fpgGvk+X3cxZVVn29vaMRX7c7TzS/nOPx4/8EPret95SU689f3Cu/kC4RJ0feup9ZWP66a8dQoLe+8t2G7rZR79fDAdagrnfKaUtFUK7+cnvO1r9PMlG413obngPbU222zstNvHQ9qOjbw/T76UWD1TGZNUrf9XlfA4r6Rh9YvvJ9vvh59ax6e0DfUURy1x7zFmt9UkTTzeQjq42juFjx99fPhT93Dcu6vL/dwc/s/IPGG9w==
|
||||
@@ -1 +1 @@
|
||||
eNqNVXtQE3ceR6FXcbyOrU7tiIYQHkplw25eJNTT4aEOUuQVHlorXXZ/SRY2u8vuBgXBO1DqMNZeFz0dRVuUkEiKIkJLEW21Ux9zIoJztRPRU9s7bUfROtKKL7hfIIieqM3kj+z+vp/H9/VLubMQ8ALFMuMaKEYEPE6I8EGQyp08KLABQVznsALRwpL2lOR0Y62Np9yhFlHkhOjISJyjlDgjWniWowglwVojC7FIKxAE3AwEey5LFrk/Wq2w4qtyRDYfMIIiGkNVmgjFSIgi+r3VCp6lgSJaYRMAr4hQECw0wYjwRZYFF2cJctEC5CxNQiPyfIZdycjBKtzK0fClSU7YGECZWN6qKH0fsrIkoCGQoHEbCRA1okUElmGAiNC4CPGKUqcF4CTM9t8+r9strCBKTc9k0IgTBOBEBDAES1KMWdprLqa4CDkJTB4WF+FhHCqR5MoHgENwmioEzasQQcQphoaJISJlBaxNlOqXJBtzFiVkLljiGCaV9uMcR1ME7oFH5kFzDd50EbGIA88euzxFQWClGFFqjRmxGZlSBPvByFGlxqBE9z8pTePQsYMbOm9/8oDDiXzIg3h7LTmGwfuejGEFqS4JJ5LTn6LEecIi1eG8Vad5KkvexngSlZxxKc/KeQ8fyznVSgyD36anmIUihpDqTDgtgKbHTXiMcalQlRpBdQiKtT7FDUS+CCFYKCHtQveNVJAGjFm0SLWYBt3DA4GDMwzWOiBMtAnldthM0HHS6R283cmJo6Pwlj0eNlY6nI6LEXKVSp6E83IorZVjWLQ6KhrVyRclGRvivDLGMRvVZORxRjDBZi0YmRsnYbEx+YB0xY05MW7FaMY81KcpKyUi3pWDjfQ8SnYNiqLusBdG8sAKK+NRtKsNBsNLeGFlgCi1ePJDUDWiUhm9WaqWja1DMZwNjufQ+npdOTyuoK+3Xxo/6m0EE/YHMM93OGssNFy1ZyzW6YfU5rw8ftSiFzPrj2CeY1G7zC0fC/5/5RsWCnlB5JOFG46WvzD6uSVzeTuPUKR0CP7OQTEsMXGxCcMXvluYnRqrT1ypM1mwlUzbKD/Lm3GGKh6abg/OHaI26NRaUp2LgFwTiWgM+ijEYFBhSK5KpSc1eixKQ+pqCylccsEll5tZ1kyDRsKEEDhhAcjwDkrO+KVLYpIS4hqykTQ2l4XDaMTh0DIsAxzpgIdrL7kImrWR8KLlgSNuIZIWs1RqMag0Wp0WJTBci2kIQo3EwgtqZBsfb5vdc0sP/eOUwZ3n4atj478L3DDBZ+jjSxsTN1xYOHlgzoKcG38zdXeG6Tu6NxRMCtpKjw8K+HD7zen16r3KNlcxCdyDsoPmQKwzoaa4pLFKtnwScy1s5u+PfjuRwfzQ3nOpdf6J+Ve/WZ8zbwv/aI1tZ7PNcX6BS//J9R2cKzJF68J1P7RV+oa2CvNw/5Y298P9qZ3b6Obe1ks5d3v2Xt9xjd58unegZNWKvL66j1tPdF7c6LdjfP3R8DtVZV9f1sxKuxzcXxUuWxqWHbKsw9dPU+57f8pbvhO3zrhs6rlbXf31rSuPel89MuODoCD9l7kdHwSPMwWGB/Rb8i8+rN485VDSje492gxtVdS4rP6Q/nvtc74KSPCfEHrsnzcrpk08GlEQW0m9Mim9IzWp68dtNa6zx8/N9jX/6p+Fzg1R9R4R9v0pM7RknaH4pxhO9kV3J1BUyqaQupaTS3fNrnBlnGz2k5/Z5vz5qF/zbjZppuV2cI2ufteByvVpiafjPw17rVvxfsobsux9N++Ursn4at7rjeeVWM23h6srNkZFhl94R//Jid3GsszJ2ytazi8rID5+Eyub/lrf4C8zfz4ZO/nP43eG375wKIq5k458/9PE2UEDcdrjaNfCRQ2lZ94RM2r96fi1FUfOtUVnbkoBgddLI7qPZ/Vt+c+p/vKJK/YcW5Md8Gvj6cKGecrgwfXbN6Z+dnvFlPkHjkR8f3/7v5aTptCKqcl9m89ejddk7c+LKj4TmnXj6r3kQ13b7GuTa4K3Ts07J0pEsP/h1MtzuYBp8xUFP+6KQSKcp/6yqIKwTz14yt2VFne3JKw+6MGXhVfvXzlm2thleHvctLIq3wTh3i8TckqKay/JHvxj9cPPhby5typSDO07a2f8nhnS7iw/ey3Wvannv1Vp1xY/ADdkyzuEpsO3Kw8cvPhbe2dRX3Bv9SPpzsD1DwfalOuu9Eyf9iDv4t9vyQIHX/HxGRz09Un76+Ldp/18fP4Hx/kGeA==
|
||||
eNqNVQtwE9cVNTZQkpSWYBIIhHijODYGP0urH5JpCcYYGhx/RhYYSMF52n2SNl7tLrsrxx+gsQIxnRLjrSH1J5gBy1JQ8G/shIJjhsC0SQmf4iHFCp/gDi20Ic7Q4rq006pPtmxDMZAdaUZ67557z73n3rsefxESJYbnJhxkOBmJkJLxH0nx+EW00Y0keavPhWQnT3tzc/KsjW6RCb7olGVBSlWrocCkQE52irzAUCkU71IXkWoXkiToQJLXxtMlwR1lKhcsLpD5QsRJqlRSo9Unq0ZMVKmvlqlEnkWqVJVbQqIqWUXxmAQn44N8J5QTJUJ2IoJnaUyEKOT4NzgCFUOXwOJDO0G5OcTYedGl2rwee+VpxGIgxUI3jYAOGIDEcxySAQtljFdt9jsRpHG2V6Ke9Dp5SVba78ugFVIUEmSAOIqnGc6hdDhKGSGZoJE97CWZKJVkOkCF/Q4VSgkUIiQAyDJFqKMYSDJkOBanB2TGhXi3rBzIzrEWrHh5dUa2b9i10gYFgWUoGIarX8cUD0aSBnKJgO6/DoRLA3C9OFk5lDZCVp1bglXhCE2KQZtCtt0dmoWYt08Yuu+6+0KAVCH2AyKKK75hcMvdNrykNGVBKifvHpdQpJxKExRdRv09WYpuLpyo4k/PvT9c5HI0nF+XQpL4036PZ6mEo5QmO2Ql1D4qxSgmoNVodUBjBBry0D2+kSyWAIrHIZR9mpaRCrKIc8hOpZHUa94XkSTgTkZv+TBMdkseL5YUnfrMH2m//TmZYw3xjHcZllfptjrdyYTWTGTBEgKHNhCkIVVjTiX1xIos68H0SBjruEK1W0XISXYsVsZI9/gpp5srRHQgfdyOCSaMZSzi+CzjYmTAcIIbN8PQyIChI8Wn14Sf4PxH2ovIhWsUjj2CSfgOGAnJSmc4X6AxAK3ZGsmaXBdMHA+NG/s+ik2moWgLHm0/RjGCSfwumAdQ1K8LqsaDR7bXCDuv/oGlGLUc4+XVmc3mR/h9WNGI8ZD/J+hw6vEPsbxbymFr4qHWD+QTiJAGDK18jH8XaEgyPZfSivblNmdxxjqLNr8YMktz4WFJFhkKuww3ssCLMpAQhVe+XKIEk/EeD2+hH+tIg86I2SwiGI5i8arNc9uW8WGq0iJCEBHLQ/rwGFFedECOKR0akzCBYLzObNQZaJ0NIJudBnqzaSEwm7UksGm1JlpvIhfqaWNjEQOVAN4WhIPnHSxqpeyAgpQTgeFhVvzL1manZb2cfnANsPA2HgtihVg4jueQLw+JeH8oAYrl3TTe2yLypS8HlrS1SqdZv9CIzAaSok0a0kzpQEa+pW1krEfH1hte+kMvsHJfuCSc4zfR9XG/mBI19MTgbyjE5mXycxZO+++CmIbpr6HPxB3RR2acbtzYnyQ6H4fLKju3/lpXX7vvWnNfaENV9s3J3zAT/lbfN/hlRVVe79u/q+waGLj86SExtOVn/wSXL/12MK7/yn+O3hEH35jlUesMM2r7lOPTrCsXLelgK2/CKdXx8+Ec2yd1TnfHkfW1mupW49en678esMe+PW3bnt4T/zhbEZe5p/9icYg9cV1ZslIlmdp/OuvGc/7DVeXPEqt9m6btubo/qIt7fl/sTbLh2KTcJtFTf7p0Kv/V5qhrE2daPLvAydpd0+nJcY9VUQlF24O/+ujOD5sTi1fF73+pvG72n1Ydn7Q4aUvZ5kl7V2ijl4LD7WV//qKgRpLN889Qla73zn3Q88Rba/++te7z6C/mfZI2Q7P8wKVV+lm+JMmYf3tTQs0HZ2x0zLyMc+dfW2xfPHvXJpWQYdzRsK35rGNxbJczoetfJwvubOuPrvz3Uu73lz49teuY+EJI/eXkjl+unfltdML3Kta03frm+2kH3rljCUwVnpGe7VN9GD/vhneJI8lROpubnNNz7dv6hhvCPtm7+x1Tnif/LHl0y5lzT0t1LVezU+SEuVdje3svdJjKd95wNU+nY6p/Tpw7inZWd94yrUwzHOs+35B1+8rUOUfMx1f8Rauv/InlWs0L717fQL9+pfOCnRk4teZk9Mrr7ETLU/MTe1bffjf90GBmhdqy94817z9V1X/d0/2j9Zv8PTHFSS3zdojlN7+amynk1tZ9FFpwobXvVuvzNZ9/nLc09gcXl7xXtGH3icEnGvndJkuvCNCUD0P7L1ZUlbQclcxPvtq7IaTdun2jd731+B/qL599c2Bn2YuvhLqr//pSc1f3XI/SIHn3OHrapl4mzs94Li40IdyBMVF7E2duz58YFfU/s88KiA==
|
||||
@@ -1 +1 @@
|
||||
eNp9VAtYFNcVJvgkUWpiUhOtMB2iBmT2xS77sBAR4wNEDA8RjeIwc5cdmJ0ZZ2aBRfFDIlJDXsMjaIyKsjxCQLAqQaMCYmjRlBTxtfWBxkdMUFFpaqOt9M4uqG1s9/tmv3vvOd855//P+U9uVTrgBYplnqulGBHwOCHCiyDlVvFglQ0I4vpKKxAtLOlYFB0bV27jKWewRRQ5waRU4hylMFM8yGD5NEGBU0qKMQMeMARQpquVhAUXlQRr5WjgCulIZkm7s301amVJQKMmFCcI1saIgvJxDKXLJChpGrfiWHoQp8b0qmSMYgSRtxEiGojCA8CtqMmM0wIIRK1AEPAUIKCmZatRnqUBDGsTAA89CRbCYUT4kAALmSYgogUgLE1CSEgaw2YwCMjE5eIQ1owQNgZQZpa3otnLA1EGNakDURFYOciHaONhVJVCJSdnOdTE2Gg6u8oCcBISl++wsIIoNfw3FZUQHOBEqTxAGVDvPmOQGJakmBSpLiWL4gIREphpXAQ1sFIGuGiXatIA4DCcptJBjYwDg+AYUdrD2WELGEwmPlOpUmgMCvWuQYAYDZgU0SKVq42G2qE30c4BuSiOpghcjqxMFVimmgcCBzsB3q0URAhMyHXAisA3f6oa5HFndOQQsIserzlmw+qkQ7G4GIhoNEgUziMalUaHqNWmIL1JE4TMjYqrDR/MGPfMjLvjeJwR4FBgbw2BryIsNiYNkDXhz4RdGT4Hi8Ht0l6jRqsL1unNQYTBrCVVAJsVHVsPjeE4YQFYrKt+qWp24sKwqPnhX4VBigUBk6uBY4CF0TSbgUXzVArFSDsCDj5uDWalSJIGGTgPsMHBl3ama7584sDxcGQhg2wagDLYqdEdeGIT5IGBIDCWc6vEeQGOM56Z5PZGTZAgrSEQ+c/ZQeThgW8sl5Qm3wbPnMuihzcrxQzeXDY7BymkXS9q14vZJUWGsCdxgMFp0f7Yl4MdlfX2CwMPOKg6ucinTO5oVopn5fYniTifAqBAEHminzbQvCuOOtuJPIUd8JAwmR6ZaZkEkbICqUKjCNYa6mLd1jjKKvfYOV1kRZyeQdr4EI3RYFCoZkDdESFonPyMxAyOIQLdAXooEyojnbVjNs6tblcqigDuBA5NsNZ4IBODbAIahocD7/ofXE+wRcEqJ/pLu7slGNSP3AhASo5glUrV/D8d3X2XKmQvVePTbqyMzHWUdjLs4adNPLDiFCOT8aQcndE59dk+zy7J6fd/vYfq0hmNel1t5lAijCIl5+u6YAMgVUYDptYl6zAjIPSYwUDimC45WaNJJqGAiGDHYpy3S/Vh7iU0pMNKd8ekGoJmbSRcRDxoGNLyY6065EVVBacHE9IJaY8lKAQ1abVB6AzEiocYgrUqlWupr4PrhIf+Xw/L8C0Y7eH6DYPfwMD7Jd8yk+aOaU/K2xSpmL7sE+xcV53C8+To3JHVRT416KXORUsX72m79XGaU7fm2BfXWt9L/Nfu4qaZ7JHbYzr6/ti3LMTozVyvLD3zcjtbPzelO8uUFfkoVcyezGnrejsc3t3HP28WS4oeKO9dN3fqLdNW/FVRuPv1BON2rw9yt2xdvnyBQtmpVHQGnIkou2H2PlQaunmbwtC99F5EdH5C1kS/RsOBN/s65yRcyTo6H7t/MaJ1ztmiA07f6A9jy0bpJyXsmhL/TuGWkP2BUoQ1fnGb9rxEFh/LyRi77ujM1IaIPyyc+asllSMSc/rG353Fv9D7TTvlVZtzyS80F5/csVKqGJE2WmWvJO8s64kxL7jk9ImUovcnxbd4nR671P9jcuxbcQvLcz7ittztG9+UtiRDl/hleZl3xfe3b587Q/ccNrff2rx/WIcqo8KwZs6UpqgfgzJG3Dsfn+PvMWm4Nqa6q75luCpyu+dn0e1Xun52Zo7Rv9n8BfK3/fOmbw01qzfSxV2jTccCd1gM42zfSgOtJ66T267/MCGxO+RaatPF9yYGNpSsyH/u9MtXboyqWW8M9Wm4tvVYj/+GrqibsWWvXibM1oJiW9thI9JTMqHl3YufetY6P90x60i6ZdHavmkrXrH2xUTqzO8U8sa72/b5543gff1vtixQ6Bc09p4NjrlTWDwMPJ+XHIVQPdXVl7/bVjcr+5ThYR59v2defUX9WN3aE5Ul8zZdCq8/OWHVnd62WXz1pe6H6/qjmo0PBupSv2bGBpyc7XX6N2+EemZFb30758GLJ/UvqXuPez7/66ZzE+lRj2wvRBXkTZ3btOJ8QRnWvmTyFLCp6qq3scRPOnXsaqTUsLugtLGkpd2Rf/Dwub0RD/3+6efTU7eyqGDdd3WH3l4S2Tiy8OCigArPC7HFu7DXHvXdUAZt8PETE3/3ObMrLX9+WKS3KreKzA79y505v72T3Hr4o8Xj0j8zXSZz2o6uLTpR+P2F0Pv7e+jVtz/8aWsL/mNx43r9uFHdASsr/WLCtSWlGyfcXGV5tDT0p305PvazEwNeLF7tPfz+qw9PfHXz96UflIDNB9uFaPWt5mLfeRXj2z65ezQkgWnduO/nQq/4wi1G3Q/n+7eX9Gb43lNEnnNOKHplim1Px15+6oZVA1fXmPx7/n6h+c/H+ztO9fc3u9Q1zOMN8z+2Hxnp4fFv1O8fSg==
|
||||
eNp9VQtUE1caFnWFXa2i9YVonY09IpLJOyEJsCziCxFkNQUfRRySGzIwmRlnJoEoqGDVrYoyVay24oPnlqWC8qgiKr4ttWrdKgYV8aACuroqbn227p0ElLO6m3PIuff+H/d+//f/35+sEjtgWJwiPcpwkgMMZuTghuWzShiw2AZY7rNiK+AslKkwZtYcQ4GNwZ0aC8fRrF4qxWhcYsYZkEoxKawEw6U4aQYMII1AapdLjRaMkxopK00A15WFiZTJ4axaKrJSJkCI9CLMaKRsJMdK39whdYVYKUFgVgy1K2k5GihLRHGS5RibkROJRXABMKtIb8YIFohFVsCyWBJgRfoFS0UMRQB4rY0FDEQaKZgOycGDOEjEj0U4C0AowgRTQlJIKpVEQBomkEMoM2K0kQA3U4xVlBEvFpEivVx4iqJFetJGEBklFoCZoExrCi0Uy/EV/514MUwF0BxfMFE6sdy9RqEMlAknk/jKpCU4LUZMwExgHBAjS1jOVArZkcAlNV+aAgCNYgRuB6UCdxQmRHJ8Je2AspOoIHaaVCZRaCXyPV1JoQQgkzgLXyDXysq6zzgHDQRqNIEbMeFmaTJLkX9jAEtD9cHKYpbDOBubVQh5gbNnSrq0y58V2Z1ec69RhZMhR/6QwWITIwodEoU5EIVMoUbkar1Mp1epkGlRhrLwrhcN731xr4HBSBY2AjqlW4ISo8VGpgBTafh70y4On4rOxhx8lU4VqAE6s06hU5lUSqBBp8TNLofBcMxoAegcF3++ZPK86LCoiPCDYVBolkUFNrD0aBhBUKnoLAZPwkl+98S6NwVCrbjJRIBUjAFoV7Pz+XbFd28BNAPbFCpIpQDY+vkKde3bGCs0CUwCpWi3M5zXYQtjaQlutEgPBVJpxYiIA1Ya+oezMbALEblEJpxRdEIK3HWvaWEtCYQ7K0527VwxBw0lJFwn7v80u+xHGh0JNCAxgnO8wdKwooLH3gkwgIZOE0j2CLlvs+IMJZQ/gcOYJABNgQh93TNAMK575BlOpEfugIGCCfIISgsicLgV8EVKiVwpd378DlCIQhVRGGC75OSLZBKlSvvtHDfCgFuFhnAGcBSHEUEmGxOiCdQpJLIgaExjiMggHCOzu3oWgXAgOpQGzWSnHKiNdtvf9RxuBG42hUq5UlubhkLpAQGvh+5wfXfNL1hPjcwpejfurh8KzSZUDZj4Qo1MJjvyP4HuJuGLBJSspieMEjJzLfl8kjrcM8QAK4aTgnJv6ah1zvHvx7yfknPc/0V381LrdIHqsrTuh1DcxDs/BiBQBxRmGapUJZpQlVqtRBONMhWKYWalViXXBJp1gYWxGOPgy8Pcc6vbtMXuivGlRoKymeDsYkBFt/HfGLtQmG0lsNVQ1m7kKy3KEBEcEkpREGLFQrQalUzmmvqZcPYwEH+yT8vYdV69XJ8+8O/16/W5V0nfQO/VCaD+65r6Bh79u51rHNpvyFA4e75aV+o19VbZFvWXlXnbcsv6HW6w3Zg4KDZnS/r0AVVxB1uwO22hN3J2fjqAvPnXczsuRgTlnY2Owxj9zvPjA0JD8pCays5/tI4931SStGdx3YtNdQdWdY5p4tuWTTfxlSnbR0wqaBl+OqmkrdP56J+vCheHeM4gb15Y+lFlxybNH6zzJP+OyLvo549t3PQjod2xPSpu09ZLpiexve2jQ75Z8Oq370+Obm9Ai7PvRQxrvDV853Y6f8gCdSm2Z6nW97boYfy0ev+cUZ+am827AsTJmQseGouv7HviPaxs3Nza1IQ/rct4Lh58lM49w3WMumL/KdX7gldsu/ToyJi0sx53Kjs4Rei26JvO8yMe/vq7GU1Hp4yP/qm2HUM+H71Mm5U65v6iyEne+Uuac2JWbL35rONJXXrki6iZv2/8pClvQ79mZuOBV4sGrpiU4+g/ZuMMjwt3z8XNvXa6r3Ra0V7p4F8OKoI3JosPfnHL2R40dHbQ/DtAcjl9afNXHmHfzxh67zOv9GPX79fZLBeGNM0dEft6/6qwK2n7ci3y8qcLW49ebvSvPzkh2NscI11cUdRn+NzaG4m/PR6U9eDFML/42nHPLpw0DMQrxq20z8Q9M9NH1ybOnzNp4hiN4cCOylMxqHr1J7za6/lfNhetvzbsxaLdtuA9QQGbxpBesZN9SgvQ/ovajRuSs4YHGrgZVYM/HFAwoKzw0pG2uqIPIoY9CR3p2/Do9sDqQZuP3Zo3it21uSV78v2x+4oTjmuCtkX7Vbb2Iai1rf6jIjw6t1SN6Lfg53JZgL/85XRnS+NeP3/TM8vB/EeH1n8ZNa8aLcqe7xGQ4e8YQntK+76u3U2FPvU+vLPqni634UHHOp+FCdXi3ZlPj1zL/+WMIzK47315ke+p1folOyv2IVO+lS08VXe59Lh+be/PHzfp0TmTFuf5Tnjmczt/RXJAs3N5eHy1393vTtDFvs3fPFhftPWl4u5x/UfeN15m+iSGVmz+2jNGXNN+f9GBppH2NfszNZvT2iM/2H8o+8NOK1j4xeP61Rknkq/2fxmN/ZAYj6/Zt39+5/QKzzifY/NlhoWrQjoOJ9nqy3atmjezxf/qBPhLqX2R4CnNOG7dG3iiJnvZzOrld6vn/nhmqsLLOTjYkn36wdaMU6e3TsBr5DFNmTlttg3LR1weSvzgGfD0euyxJ2X28t5Pi24g9Pm2l/EP18a09L604Zqtteniv65P+6PhHHnz5zS6cbmKDK5TrKRDr0xrjWy4O9yv85SPb/FV3vm8LDh4TclDMn3ZsoxfpS4b9+mVbcz9c4Jnr17/AcjtYQ0=
|
||||
@@ -1 +1 @@
|
||||
eNptVH9UU/cVB2ldtXr0oJ7JsNvrm2tXzUveS0JIQuMaAyJKCCT8Si2NX977Jnnh/fK9l0hAt5byj7Mre6VVNmq3ISTCoIDpKa0/urnp6nHqpjtsoMfVM+vpaWVz1M5Wz5z7BsEDR3Ny8l6+997P9977+dzbkoxBWWFFIbOfFVQoA1pFfxStJSnDbVGoqK0JHqphkeku9/gq90dldtwQVlVJsRsMQGL1IVncpqdF3iBKUACsIUYZ6DBQDehI4uAUVne9yMTHe5txHioKCEEFt29pxmWRg7gdjypQxnU4LaLLBRUd1KDopxVMDUNM5BiUANYgiNsFDDaCNCImBjE6KkA2KMo8vrNOh/MiAzkUyHGAB4RJTxHWeoIVFBUgPB0u4HZKhyuqKOF2Icpx6XcZAh63BwGnQB2uQl5CdatRGeVD6vN3JsMQMKgpbd1hUVG1/tllDgKahpJKQIEWGVYIaQOhJlbSYQwMckCFfagOAU61UOtrgFAiAMfGYOJelDYEJIljaZC2GyKKKPRP102ocQk+aO5Ld4dALRNUbZADQgi1lhUMpB5VOdRIoBJZgUNNJdI2LSHFEVHC4dkGCdANKJ6YJllLkHrKqiffme0jKlqPG9Ae3xxIINNhrQfIvMWcmn0uRwWV5aGWdJU/eN208f51SZQohb7Dc5CVuEBrPVPtH5kTDVU5TtAiAtF+Rb4z0xsOCiE1rO2nLLYDMlQkJCn4SgKFqVGlpRvxAE+fTE5rq8uzeYa+v2cs7y5EnGhHfUDVYUYj5gYyZiSNeRhF2U35diOJFbsr+13T11Q+lILhShkIShDRUDRDeZIOR4UGyPS5Hkr2gAvQYUikYZHGtfF8SWZjKA8dxoPGNJkOUocJIqpblOHUG50OQOaooqIexBAKg/y7q4Ec1xIemQ2xwkAtUYwkSHhhKH1df1QhaJS0DDiCOlRLeJE/x/KsSpRO/XrvzS4iljKbSfL9Bz0qxQaIxrzbQpLkh7PNXsgjPlCdc0BMNtuRh3vNAOXZrOZDc10UOCuTLovy/oPm6egeykLySn/tjDtRwmgfogUUIKkIUrUN5FuDFKXQFpjHK8Z4qFECg3TwXuOIe0rQkoX+Mqe7xNXnQ7guUWxgoXYhMzsQoIOBet4hlLuiZb6SBn6jxIkRz3ampMRTWcSDyCZ9gxzycm5LYalfVAPmjVaCyjcbLWYTah1B6dHEoJ1Sv542hyqbmCbB+7zkDQkRv7/aU1WpxvPM9fmWeFNFVeFml7VkU4SK0OVx/+ZoSFof3WYLV7sjtRGjv6bCWVYuFDqDbhDx+9RNwRoGePOM9Sos9tc6TUpjrJQ2eUtQlfXOAkwCathhKEA7T2KR4h3TAiaQgIm0fI12cka+BRgjpqlw3F9RBdhGtJ89AhcvwHyQRmsNPQEPfawKHWWiABM+KKMB1fpoTowyaG/JMOHaQHidfu1dm9GcZ8nLB1ar1WJmKEisR2thZkLuT0B3euklAacSSozWUmGTA7ebzSa8AEncgQJJcmrnv4yGVEb+J+ZVfWf3YxlTnyym3dPWSS49cf3O8v02V+vLP/3t8FFVfPP2p+cHujKXHM+ll13ksge+uLQ21cnVXLWe7rx1NDn5+Jpdi2PNCy9ZP1PbxdTiq/EL1dHOPV1vsnUDIyzRuPb2i8vxAN3y3s5/6I8sqNn3xNNnnP5XFsRee2bo+Abu1JE1uw+ezl41wPxkccT93Z4Tm7ovWqr+8A3jk5Gb1dGJ4Vt3Wk5tXEcvK5pwLH71zOP2X+fkrr4+Wke7ty7L+eFETv+eiaxVrzpyLpuKsCGHmgKjW/Jbvlh6c9EPLu14KfBJ/NElmWWfONd8PB6K6Zw5S6MvZuYseH1ec7i34Lmh586e3bBeC+Xs+N/rL721z/h742Rn6lhkxVjF1o6xZ9a2W39cmhx75Ot1bRPZ1n9WjbStpBaFvCXfbJTUF5zmf5e+vfcXqwPHxs8dOPRoT2hhir47mHulIJno+KDtMH7yYLGW6Q52sJdXPPvz4hujb39tm3f8208lR39UcqutYt9bC889YZv/+e9Knw/8cnusdesS7r/zVhwYXbXueDEYP9cr7crae0CsONVRP3z24Lsn9yyqaXSXXwTye/8Z6/Bfe6QZvvHx9VXtI/NzvzxW1nin/fKOw61XTpuut3wrp5P73ucvMF3fL2xpT11ZHhtsHXvj2lfntyh7rsarHtM+uvJB3UQddvDMR7f5op/VTF5cvZLD7uhrP/3q/NGR/htNqd7BBbd3L3n2X01wfqzd98fem3957a9VAcr3FHnshiDdXLmT+NOfJ7ft3ZX8LDtygWt+ckfNZNOXt6DV0/q3a8nfIP3cvZuVkTt21zCUlZHxfzq9j+4=
|
||||
eNptVHlwE9cZN/EfOEMoDZMMmSRutusATUYrre7DqFP5kOI4wsaSje00MU+7T9Jae3l3ddlOUoiThnFSWIcwCRmmAWSpMXawMTnKYcpRB6c4hbgtGFzKUPckTU0urgb3ydiMPaDZ0b593/He9/v9vm9dJgYlmRH4ed0Mr0AJUAr6kNV1GQk2RaGstKU5qIQFOlVZ4fPviErMqC6sKKLs0OmAyGhDktCkpQROJ4iQB4wuptdRYaDo0JbIwqlcqYBAJ0ffbcE5KMsgBGXc8XQLLgksxB14VIYSrsEpAR3OK2hjNYpeLmNKGGICS6MLYBFeiPMYTIBsRkwIYlSUh0xQkDj8uWc0OCfQkEWBLAs4QBi1esIWIBheVgDKp8F53KHX4LIiiLiDj7Jsdi1BwOGOIGBlqMEVyImobiUqofuQWutzmTAENAJlQyosyIraPbvMXYCioKgQkKcEmuFDan+omRE1GA2DLFCgBmuWFboLVcPDKSDVrgiEIgFYJgbTN2PVXiCKLEOBrF3XKAt893T1hJIU4e3mrixGBAKOV9RdLOBDCGCG15Fao9bQmyBQoQzPImiJrE1Ni0lEF79vtkEEVATFE9NUq2lSq7dpyfdm+wiy2ukFVIVvTkogUWG1E0icxdQ/e1+K8grDQTVTXHn7cdPGW8dlECl69PTNySwneUrtnCLhwznRUJGSBCWgJOo28r0ZbFjIh5SwukNvsf9KgrKIhAVfTKMwJSqvSyE24PFjmWmFba8onyHxXM59qRLEjHrAH45qMIMd84IkZiANZkxvdpB2h9GKebz+7uLpY/x3pKDPLwFeDiIaSmeIz1DhKB+BdFfxHcnuKQZUGBLZtEjp6qhVlJjYlEI4kMiS6SQ1GC+gugUJTq2obAAyR2UFYRBDWWjkn4oBKammKyQmxPA9CSIrRGQOZY/rjsoEhS4tAZbQ700QSMSQZTgGgTX1P93BiFi9yUSSH93uoQgRiJo9ZSFJcmC2WYIc4gPVOSeJ0W7ff2evmURmu820d66LDGfdZLtF/uh283R0p95CcnJ3YsadYGh1AK0bSH1jHNphwhyDChcwcHSTVeaMQXtTZBcVvAkccVMJaqakbqXLW1bc5UN5iwUhwkD1zLzFDQ1UsCHAOV2gtMiXqDSVB+ptq2KltYG4PS4Yy40NJS7SZ/a6qzi32ShwlgZmFaG3mmxmo95otRJ6LeoYNFmqGynS615dQ1X7npICghgyS9Eaj5cqC4Y8T9FRUBtYGanR2i0lsMTlCnuqkrStaRUng6Qn3FzTZE3QriK3X4wa7I2Uka83hJQgmeDjItnskb1UdX0iXC4oETdpp+tNhZgIlLBTV4gmn8ggnJzTAiaQgImb8jXOyLcQo4UsFc5bg6oQewJN6QqeTRZiPkih4YbegIM+RoHOlQIP0z4ooQZVuyhWiNJoekkwXewmqlx16h67yWqB9qDJYLcZrMCmJ0pXV/XOdMitDkhlR18GsAohxyi1P2x04g6TyYgXIok7bRakuKnJvxY1qYT8f3tX5JH2vJypX26bz9t+lvz+gcur75HLFsR6Pz9d1jq2+wR8TKOUL+p8oO7Bs2eOdRQMva0pv9HS91Xf1/78n18OnmsxNnuW9pWcvIRdc318cc9GfKjk+OJlf72fq9vPfLlM+6fSwa9SQ1WDZNHTBzfmLblQdfr5E/dtMj+wTTyaW6YZbX5/yLi559S939Z0DC5+/+/ODf1L263R7lcXjbWtH6lbeoy6ej3/DVU5aljfu/y/yfLagR39W3pLvjiwt6n7bnrZwt/nr/1jR+vPtvbNz1XGX8d6F75ycmNj90VXMi//0IXMQ/GJ4xEvuHfeqXPP1v7vriul9YPxZlPHnpeXZ1oP1h6WMyvEJf4jpW9uGRpaKoY8BT8sf/3RT2s7fvPnUy3n3YfXtmz8ZMH1J/nDL/3abi1S78b8ibfHvQH8yspz37NMPNt0ZYXtS7pIc/WhFbaByd1twrc/2TWOV4IvCn/X6h/Ydy3n4ppL4z+6ceiZyGuLJ9bsFxvfjBmGk/70Sy0jvuF8YWinfDK+IC//l9/lRfZ39J9esy2d/GSScm5Yf/6dovljOVffuSab/nPP1u1vVdVdeKyIO3to9AzFuHsK2E2rBnY+3CQs3PzPF/Mnvl5++NLeq989UVDt+qYN7qbPRgdr2H1ln+peE8UPHim6//NfvPqx9MYLm7SpR8uspVvokeGfLvrm5ZBm8sPnt24K/WMJ6F17cVGROvaklT30QbKGzrRPdMWOxj/7wXEa/PhG+998bTeG5/97p/utzWPn/9B6ZLjyUnX9+J76oYcLVtT09F9f75ys8Fx/HOjmn/lM/0LniVOM7nLkL6Hh2MEjI/8aaUCqmpzMzXn8WGfuUG5Ozv8BEYit3Q==
|
||||
@@ -1 +1 @@
|
||||
eNp9VHtUE1cehkVWpBVpaw/KcXXMainIJJkk5AFElkdQkPBKkAXL4mRykwxMZoaZCRCUPqxsrVbboT7KEe0irxJdX+AKWLuuBXzVtrhndWmpK+qixdJDtWytWtkbBM7uanf+Sebe3+v75vt+65pLAceTDO27j6QFwOGEAF94cV0zB0pcgBfWNzmB4GCsDZkZJnO9iyP7IhyCwPLRMhnOklInyQscTklxUlaKyQgHLsgIxslSYLxMg4Wxuvvq1kicgOdxO+Al0avWSDiGApJoiYsHnCRSQjCwLy3Ag1yYHcYjggMgDGWFvZFimimjEVCOeysijA0hXDQgbQznlFQWREqcjBVQMHFiCJR34hSFUrgAc2FlAThZiEhwcbCdXKqBJwxbyEqisUgJTAC4UxJtwykeVDY7AG6FPLzR4GB4Qdz/38gO4AQBWAEFNMFYSdou/tFeQbKRiBXYvL08EAENxnkTPcUAsChOkaXA48WHQtC0ILaybsghjXqZK5fJpQqtFNs3ARwV3CwQD+IsS5EE7q0iK+IZuulR08cv9k/mUYC2Cw6xHlNjH3CAZyHf4PUmXoCA+XUNcFTwyenmCeL3ZKyYxHjZJ6QhCY4tHjfhQiSiUCBGnEMUckUUgmHRSk00pkOWGc37EifamJ843iEzh9O8DSI0TLLSTDhcdDGwehKfyMdH5Sj8FoAinaRgcfNkBUDH/6NOknbBcZoV8vGnT/J4IAecOEnDJlPBmE6t0enkfQv/bzCE4BA9Op1OoZTLYf0jj8fyQBD3qJQdP1tlqteRJ44/Nffxn8U3PsVeTD71dP5vIHQa50YJr/YalQo55i0G6FLGjbrYR0pFoZhKSQKgAukEYgOmlSuPeRXC86hXD9BSkGWKKUMZjrSTtFgXcbQcLWYgCVMVvFqlCfd4srZt4pblmHL31FWd4tDE+YT7UdIq9iFaHFfodFEWtVxl0yhUUEZAqbNEWVUWLY5pMdUBwoYSOOEA6CP1ic1JeenxxpREjwkIaCLDFJNA/MJ3dmEhYSu0OPWZJemFOjNuSy/TZaU68iqK5HZHunlFrjvX5HDFu1Ny3NJUjbUiPr/CgGIalUKtUqqUOhSTyqWYFEMd8Xm4pijToE3TKSuMJoXBrF2R52AKk5LKXFnulWnOBJBnxMk0nlKShMVamFtcoTEb1FFFuhWZGQnlbukyoTzVuVzAbGXmohwik1KWpBuZImd6ijQjJ6tMRybnRSWos/hiizEGYXHBoZfFwCXEklAt+gnToNA0qNcyimj5pGViECvjFY3+P3ZHDLIcej6DptwxiAkQcBXBX9wJTKQA9OkMDZpM8MMCTvQQFOOywoXCgabEZDQ7Pk9s0ylUUeooNYFrtUqtjcDQhAzTwUlfTvmuwbuNmnFKQPlSQmx1KPWSaJVKKYlBnLheq1bJ5eNr+DW4GjgY3/2Lews2BfiMP35W8znumjr4wbbw2mdDGqsNdNs5olr/zcIjIWE7tP8M9U18BtuA235f/fyOMb3/0KGNPwSWEqFfnXe7sPaLzovn/1Sfc/TenM6mL9ULNDOrZt2ij5EnK38Kbz/73b+Y24P5N9a0Nd7A06xi697akIT6K1/cN7Y+bGnZWWHqeM1v8aXOhZ4ZTVVB712Q1In5A9/P07cUj611YdMrSoY9nF/L1S0v3R9a/1zHxboi88cvaXtqd8c2New/b0rddqd2M5c1+vlzbGDDLz+rmTX3pv+mJP9vT2TUWE4pTsTv6f7w9VeTDxrGnu559aQpaLU+vnf65lF519YXb2hnjtyRvb/kc3HtN5aalyNrl9wtq9mNL3pz3pU37hhdx/PRodGinPY46UBd2GJkq/Bhr+V7S+CL0/rnguCh5etPRqYM9G4+2bNpyUCv77nRuw+ql01fdVsb/nzKsuDQqq2HfQfvJEX1P9MVWxzU1tx1LTO4v+DBb5cOnnoQFbJYWbQ66iZ7JWXMfv1+WNKugbKy1qcKH7bPwduN1oqy6btG5MTaNqLDktzzdOOC6hDft0cMI+9vSrG/sNWK2U7Iu2JzT1/d/ZvTrdP8P/HNNv81ULhd+d2SQT0g3gxriRlU0ouyPq25EHst31fv6fV5N1u7k1cujUDeSnj5WdAx7bJx8U+tVbPptzJlJaqD82c6kL4CWdVnrfY/z1d6DGMfRdzK2Ps3S8Er2tzG6yMjlWn/CNFG3O1K/Trgg7NLt8cFB/Xec9+M+VKR1T5jRJM9v4/d8F7dO2ckpP+vNxriGucK2+NW9/yqYH1AeMbVr69TP356+ON5Y92HnwqUDGwHMwp/WLS65A9DybHDumG7PbV7oC3pd9KUtXuvnTnlNxS+a4t/cBAWHDD4Vf+FkmrxyIYtfc3hs1duTF6nnpHzzkjm+dCq7P7u7Os/vrKq8+hZV/fOhykB21qLd4W+/e3wgbPGHcPKwFn1+rlxppWXLl8C9+c03TrT2XLuXgf1F+bG6NDwcBwU+diYn0+G5tjfV/r7+PwbBje00g==
|
||||
eNqdVH1QFOcZh1gnjePo1MFoGRs3h8Yv9ti974OcjhzINxg5YA5T6d7ue9x6++Xu3nF3YvyIHUcl6BLJSDQlHoTTkyiK4igKDRVDY0ajxigxosbPTEWjJWJriX0PT2vbpH/0/WN3dt/n6/d7nt+zKuQFokTzXGwzzclAJEgZfkjKqpAIlniAJK9uYoHs4qnG+QWFtgaPSPfOdMmyICUnJRECrWZpSRYJRk3QSV48iXQRchLJswIDhsM0OnjK37ttqYoFkkSUA0mVvHCpSuQZoEpWeSQgqhJVJA/zcjL8UQK9p0mI7AIIz1AwN+Lm+AoOAT4iEhHhnQjp4QDt5EVWtey3iSqWpwADHaNFoBJLMAzKEDL0hZFlwAoQkewRYTpMbYR/eKFMUCXjiSroAAhWlewkGAksC7kAQUEe1jS6eElWdv07st0ESQJBRgFH8hTNlSut5QFaSEQo4IzkSkQCkkyFIQ4ODLOnhN0ACCjB0F4QjqBEIXROVloFP2SSQyP8+ZIwtcakxpuj8FHZLwClhRAEhiaJSJSkxRLPNT1J/d8Xu576MYArl11KA27At4tAEiDr4O0mSYawpVWNsGDweU8oSn+wIOcp0r6YuMY0WLxyxObyJCIaM5JH+BENptEjuD4ZMydrjUhGnq3ZGk1j+8ny9thEgpOcEGH6U25CpMvDuQEVtv4kH0d8kEYv70c9wpMWoJAfL00CVKZZoDSaDBpDpw+FXQMMzdIQ3/BT5t2Ak1CW5jyw5pAGGz69Cc9bioAlaA7W8J/WuNlsNuoNHT8fFoJ0KTtx7NnpVf3vyMMOYfOTA2Mfed48agTlI/pRMjJQDRqdrj3SSklCI42DCoB0MHwFyot0Oc0p22Ye8KFuHmZ4xktktDjSDykxYbp90VtB5H3+Z1dBXLsnehFVK0pTSi+iIwidTo+ZTWY9ACaNQe8gnbgJd5gMgIQPfDfpREmCdAH0yZwooTR7/ty8LGu4EMiolefdNKj5OjaurIx0ljlYSxajznQbCyTSWuQpmcsHSvBsYE2rKM8vXmzNUQe8EmkQPVmlLj1nR3GjzqTX4lqjEcXVmBpX42h2cUW2UzCkaT05hMObmxfITxVL0x22N/gsI29wLvaXOnJLOSw1Q8ZpE2YtVhc5fXZtDu5kjXb/Es4l6u2CwaQpKM/m2TRrfqAiINishiIsF+iX2CV1hdlFaQJluiw6Pz0FEQjZZUlKgUtDoKEeLNHxRuF4o0+GW/t0uFMQio801vKc1lOQTKjOAo7xpyCFgISrA74JFhTSMrDk8xzo3QhJ8XhpylJWXMSZyUxNunoB6xPdaTypyeaLHE6vCc+leCOrw3QggOvS4Tg8xwqmw1EsSowB05mGx+1fZf8fFTUVQgUBUQmTDO+h4EoSQZN1Hrpgrl3ZZ9YZDcDsIByEU6/TUBSaXrKg5ammn2m2MbLPQgQjo5KXVFpdWosqWafTqlIQlrCYDDoMG17kK+FaEaF99wsPJq//ZczwGSHb8vhJxtFDs1bn9zuY2wHDfsuBKbdenb5u0elF73ddzvzm11/VmGpNbn7048HU+LsZX9yYeU+5a7Hc627RfnCw4VHHkYMVd28futjX3ta5pL2t4sKFCykX+grd8QlDl6iz8rHq7eeKu65l2YN0Vd7q7BPylgv9F9u9Z36Uz21Gqg9/l3u5+aWhojG3hmozPmoeqR218LOmR3/tFuQB5pCHfOF61Yi6O91T7lHJb5ul4qqRDaofPwkO9spLHnTgxcEFic2zTyi+aTNiyjdmIilfKCswf1Pz9xOvz0jd0XP9zow94qT4P95aMX76/Pie2IyHtXtzgxMnXlmjSuietebFh5u+r58c++7np872LfzNqy2DcQknY2dX/uNv8bM3/OLbi36VhdwTHrN/Wg1/9ZNt4wZck+deP3/o4q5zJWz7ovrkG+mfXvlmdOEb221x46vSWl4Z++H5paffLLKv7W7X+Bw7NVO2zmvbOHbbhPq5GTZfMA2bImsDy1c49v/p2kP3D8GNq5C1MY/uVxW7b1b2ZMfW9mf1iPLQ1LfWdg5teDRy/JRZt7GerZ1XR91cV/rxW1de77oZBvsXX6aplOLd9z8qMJavH7X3vQln103SIg+NP/xqBPreUfPAJnFEYY2lPXUreYP4Mp2pq1YLzduK+gp/r9ROX4l+WNTXej/1sG3Wi+HwpYxTyjjqFdXd844v+cn9zXcGdXO6QnN+Fzz6bfHOMzv7a1aXkvaxIHXH4U3Ld3SgM3ort9SQCdVfnbwzp3ScPUttPFt3mFpOfLyGPnXt6ImVwY5uqzclewXmG1PY1Jqzfl+zqTLp6+OffoaVTBy9NxS3YUJswhi90VE3MB89nPv3g/WnW9UrD9BtVR2Vjs0NDbYzpYfi5lU/uAZmXkua7fvA+xrjP/mHTkfvm7WD3eWdht3v9I7vPLYmbfPU909QtX/WHTuOTf3u0ssn/1L2Tt2G9Yuzx722bL6/cXPeAfp4cMFA6N0t/W1M+GrX6/UHtpw7tzw2Jubx4xExzmmXlq8cGRPzT06hzwc=
|
||||
@@ -48,7 +48,7 @@ From the opposite direction, scientists use `LangChain` in research and referenc
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022‑05‑25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022‑03‑15 | `Docs:` [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa), `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022‑02‑01 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al. | 2021‑12‑02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
|
||||
| `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al. | 2021‑12‑02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021‑02‑26 | `API:` [langchain_experimental.open_clip](https://python.langchain.com/api_reference/experimental/open_clip.html)
|
||||
| `2005.14165v4` [Language Models are Few-Shot Learners](http://arxiv.org/abs/2005.14165v4) | Tom B. Brown, Benjamin Mann, Nick Ryder, et al. | 2020‑05‑28 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
| `2005.11401v4` [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](http://arxiv.org/abs/2005.11401v4) | Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. | 2020‑05‑22 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
@@ -970,7 +970,7 @@ reducing degenerate repetitions.
|
||||
- **arXiv id:** [2112.01488v3](http://arxiv.org/abs/2112.01488v3) **Published Date:** 2021-12-02
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
|
||||
- **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts)
|
||||
|
||||
**Abstract:** Neural information retrieval (IR) has greatly advanced search and other
|
||||
knowledge-intensive language tasks. While many neural IR methods encode queries
|
||||
|
||||
@@ -15,7 +15,7 @@ LangChain previously introduced the `AgentExecutor` as a runtime for agents.
|
||||
While it served as an excellent starting point, its limitations became apparent when dealing with more sophisticated and customized agents.
|
||||
As a result, we're gradually phasing out `AgentExecutor` in favor of more flexible solutions in LangGraph.
|
||||
|
||||
### Transitioning from AgentExecutor to langgraph
|
||||
### Transitioning from AgentExecutor to LangGraph
|
||||
|
||||
If you're currently using `AgentExecutor`, don't worry! We've prepared resources to help you:
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ LLM based applications often involve a lot of I/O-bound operations, such as maki
|
||||
|
||||
:::note
|
||||
You are expected to be familiar with asynchronous programming in Python before reading this guide. If you are not, please find appropriate resources online to learn how to program asynchronously in Python.
|
||||
This guide specifically focuses on what you need to know to work with LangChain in an asynchronous context, assuming that you are already familiar with asynch
|
||||
This guide specifically focuses on what you need to know to work with LangChain in an asynchronous context, assuming that you are already familiar with asynchronous programming.
|
||||
:::
|
||||
|
||||
## Langchain asynchronous APIs
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
LangChain provides a callback system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
|
||||
|
||||
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
|
||||
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is a list of handler objects, which are expected to implement one or more of the methods described below in more detail.
|
||||
|
||||
## Callback events
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ The only requirement for a retriever is the ability to accepts a query and retur
|
||||
In particular, [LangChain's retriever class](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html#) only requires that the `_get_relevant_documents` method is implemented, which takes a `query: str` and returns a list of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects that are most relevant to the query.
|
||||
The underlying logic used to get relevant documents is specified by the retriever and can be whatever is most useful for the application.
|
||||
|
||||
A LangChain retriever is a [runnable](/docs/how_to/lcel_cheatsheet/), which is a standard interface is for LangChain components.
|
||||
A LangChain retriever is a [runnable](/docs/how_to/lcel_cheatsheet/), which is a standard interface for LangChain components.
|
||||
This means that it has a few common methods, including `invoke`, that are used to interact with it. A retriever can be invoked with a query:
|
||||
|
||||
```python
|
||||
@@ -57,7 +57,7 @@ Despite the flexibility of the retriever interface, a few common types of retrie
|
||||
### Search apis
|
||||
|
||||
It's important to note that retrievers don't need to actually *store* documents.
|
||||
For example, we can be built retrievers on top of search APIs that simply return search results!
|
||||
For example, we can build retrievers on top of search APIs that simply return search results!
|
||||
See our retriever integrations with [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/) or [Wikipedia Search](/docs/integrations/retrievers/wikipedia/).
|
||||
|
||||
### Relational or graph database
|
||||
|
||||
@@ -11,8 +11,8 @@ This need motivates the concept of structured output, where models can be instru
|
||||
|
||||
## Key concepts
|
||||
|
||||
**(1) Schema definition:** The output structure is represented as a schema, which can be defined in several ways.
|
||||
**(2) Returning structured output:** The model is given this schema, and is instructed to return output that conforms to it.
|
||||
1. **Schema definition:** The output structure is represented as a schema, which can be defined in several ways.<br/>
|
||||
2. **Returning structured output:** The model is given this schema, and is instructed to return output that conforms to it.
|
||||
|
||||
## Recommended usage
|
||||
|
||||
@@ -109,11 +109,11 @@ ai_msg
|
||||
|
||||
There are a few challenges when producing structured output with the above methods:
|
||||
|
||||
(1) When tool calling is used, tool call arguments needs to be parsed from a dictionary back to the original schema.
|
||||
1. When tool calling is used, tool call arguments needs to be parsed from a dictionary back to the original schema.<br/>
|
||||
|
||||
(2) In addition, the model needs to be instructed to *always* use the tool when we want to enforce structured output, which is a provider specific setting.
|
||||
2. In addition, the model needs to be instructed to *always* use the tool when we want to enforce structured output, which is a provider specific setting.<br/>
|
||||
|
||||
(3) When JSON mode is used, the output needs to be parsed into a JSON object.
|
||||
3. When JSON mode is used, the output needs to be parsed into a JSON object.
|
||||
|
||||
With these challenges in mind, LangChain provides a helper function (`with_structured_output()`) to streamline the process.
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
|
||||
:::info[Prerequisites]
|
||||
|
||||
* [Documents](/docs/concepts/retrievers/#interface)
|
||||
* Tokenization(/docs/concepts/tokens)
|
||||
* [Documents](./retrievers.mdx)
|
||||
* [Tokenization](./tokens.mdx)
|
||||
:::
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -21,10 +21,10 @@ You will sometimes hear the term `function calling`. We use this term interchang
|
||||
|
||||
## Key concepts
|
||||
|
||||
**(1) Tool Creation:** Use the [@tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) decorator to create a [tool](/docs/concepts/tools). A tool is an association between a function and its schema.
|
||||
**(2) Tool Binding:** The tool needs to be connected to a model that supports tool calling. This gives the model awareness of the tool and the associated input schema required by the tool.
|
||||
**(3) Tool Calling:** When appropriate, the model can decide to call a tool and ensure its response conforms to the tool's input schema.
|
||||
**(4) Tool Execution:** The tool can be executed using the arguments provided by the model.
|
||||
1. **Tool Creation:** Use the [@tool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html) decorator to create a [tool](/docs/concepts/tools). A tool is an association between a function and its schema.<br/>
|
||||
2. **Tool Binding:** The tool needs to be connected to a model that supports tool calling. This gives the model awareness of the tool and the associated input schema required by the tool.<br/>
|
||||
3. **Tool Calling:** When appropriate, the model can decide to call a tool and ensure its response conforms to the tool's input schema.<br/>
|
||||
4. **Tool Execution:** The tool can be executed using the arguments provided by the model.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ All Toolkits expose a `get_tools` method which returns a list of tools. You can
|
||||
|
||||
```python
|
||||
# Initialize a toolkit
|
||||
toolkit = ExampleTookit(...)
|
||||
toolkit = ExampleToolkit(...)
|
||||
|
||||
# Get list of tools
|
||||
tools = toolkit.get_tools()
|
||||
|
||||
@@ -82,7 +82,7 @@ Here are some high-level tips on writing a good how-to guide:
|
||||
LangChain's conceptual guide falls under the **Explanation** quadrant of Diataxis. These guides should cover LangChain terms and concepts
|
||||
in a more abstract way than how-to guides or tutorials, targeting curious users interested in
|
||||
gaining a deeper understanding and insights of the framework. Try to avoid excessively large code examples as the primary goal is to
|
||||
provide perspective to the user rather than to finish a practical project. These guides should cover **why** things work they way they do.
|
||||
provide perspective to the user rather than to finish a practical project. These guides should cover **why** things work the way they do.
|
||||
|
||||
This guide on documentation style is meant to fall under this category.
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n",
|
||||
"Now you've learned how to pass data through your chains to help format the data flowing through your chains.\n",
|
||||
"\n",
|
||||
"To learn more, see the other how-to guides on runnables in this section."
|
||||
]
|
||||
|
||||
@@ -98,7 +98,7 @@
|
||||
" ) -> List[Document]:\n",
|
||||
" \"\"\"Sync implementations for retriever.\"\"\"\n",
|
||||
" matching_documents = []\n",
|
||||
" for document in documents:\n",
|
||||
" for document in self.documents:\n",
|
||||
" if len(matching_documents) > self.k:\n",
|
||||
" return matching_documents\n",
|
||||
"\n",
|
||||
|
||||
@@ -141,7 +141,7 @@
|
||||
"{'description': 'Multiply a by the maximum of b.',\n",
|
||||
" 'properties': {'a': {'description': 'scale factor',\n",
|
||||
" 'title': 'A',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'type': 'integer'},\n",
|
||||
" 'b': {'description': 'list of ints over which to take maximum',\n",
|
||||
" 'items': {'type': 'integer'},\n",
|
||||
" 'title': 'B',\n",
|
||||
@@ -530,7 +530,7 @@
|
||||
"\n",
|
||||
" def _run(\n",
|
||||
" self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None\n",
|
||||
" ) -> str:\n",
|
||||
" ) -> int:\n",
|
||||
" \"\"\"Use the tool.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
@@ -539,7 +539,7 @@
|
||||
" a: int,\n",
|
||||
" b: int,\n",
|
||||
" run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n",
|
||||
" ) -> str:\n",
|
||||
" ) -> int:\n",
|
||||
" \"\"\"Use the tool asynchronously.\"\"\"\n",
|
||||
" # If the calculation is cheap, you can just delegate to the sync implementation\n",
|
||||
" # as shown below.\n",
|
||||
|
||||
@@ -67,9 +67,34 @@
|
||||
"When implementing a document loader do **NOT** provide parameters via the `lazy_load` or `alazy_load` methods.\n",
|
||||
"\n",
|
||||
"All configuration is expected to be passed through the initializer (__init__). This was a design choice made by LangChain to make sure that once a document loader has been instantiated it has all the information needed to load documents.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "520edbbabde7df6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain-core** and **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "936bd5fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_core langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a93f17a87d323bdd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Implementation\n",
|
||||
"\n",
|
||||
"Let's create an example of a standard document loader that loads a file and creates a document from each line in the file."
|
||||
@@ -77,9 +102,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "20f128c1-1a2c-43b9-9e7b-cf9b3a86d1db",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:56.764714Z",
|
||||
"start_time": "2025-04-21T08:49:56.623508Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -122,7 +151,7 @@
|
||||
" self,\n",
|
||||
" ) -> AsyncIterator[Document]: # <-- Does not take any arguments\n",
|
||||
" \"\"\"An async lazy loader that reads a file line by line.\"\"\"\n",
|
||||
" # Requires aiofiles (install with pip)\n",
|
||||
" # Requires aiofiles\n",
|
||||
" # https://github.com/Tinche/aiofiles\n",
|
||||
" import aiofiles\n",
|
||||
"\n",
|
||||
@@ -151,9 +180,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "b1751198-c6dd-4149-95bd-6370ce8fa06f",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:56.776521Z",
|
||||
"start_time": "2025-04-21T08:49:56.773511Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -167,9 +200,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "c5210428",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -q aiofiles"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "71ef1482-f9de-4852-b5a4-0938f350612e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:57.972675Z",
|
||||
"start_time": "2025-04-21T08:49:57.969411Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -179,10 +226,12 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"<class 'langchain_core.documents.base.Document'>\n",
|
||||
"page_content='meow meow🐱 \\n' metadata={'line_number': 0, 'source': './meow.txt'}\n",
|
||||
"page_content='meow meow🐱 \n",
|
||||
"' metadata={'line_number': 0, 'source': './meow.txt'}\n",
|
||||
"\n",
|
||||
"<class 'langchain_core.documents.base.Document'>\n",
|
||||
"page_content=' meow meow🐱 \\n' metadata={'line_number': 1, 'source': './meow.txt'}\n",
|
||||
"page_content=' meow meow🐱 \n",
|
||||
"' metadata={'line_number': 1, 'source': './meow.txt'}\n",
|
||||
"\n",
|
||||
"<class 'langchain_core.documents.base.Document'>\n",
|
||||
"page_content=' meow😻😻' metadata={'line_number': 2, 'source': './meow.txt'}\n"
|
||||
@@ -199,9 +248,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"id": "1588e78c-e81a-4d40-b36c-634242c84a6a",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.028989Z",
|
||||
"start_time": "2025-04-21T08:49:58.021972Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -211,10 +264,12 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"<class 'langchain_core.documents.base.Document'>\n",
|
||||
"page_content='meow meow🐱 \\n' metadata={'line_number': 0, 'source': './meow.txt'}\n",
|
||||
"page_content='meow meow🐱 \n",
|
||||
"' metadata={'line_number': 0, 'source': './meow.txt'}\n",
|
||||
"\n",
|
||||
"<class 'langchain_core.documents.base.Document'>\n",
|
||||
"page_content=' meow meow🐱 \\n' metadata={'line_number': 1, 'source': './meow.txt'}\n",
|
||||
"page_content=' meow meow🐱 \n",
|
||||
"' metadata={'line_number': 1, 'source': './meow.txt'}\n",
|
||||
"\n",
|
||||
"<class 'langchain_core.documents.base.Document'>\n",
|
||||
"page_content=' meow😻😻' metadata={'line_number': 2, 'source': './meow.txt'}\n"
|
||||
@@ -245,21 +300,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "df5ad46a-9e00-4073-8505-489fc4f3799e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.078111Z",
|
||||
"start_time": "2025-04-21T08:49:58.071421Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='meow meow🐱 \\n', metadata={'line_number': 0, 'source': './meow.txt'}),\n",
|
||||
" Document(page_content=' meow meow🐱 \\n', metadata={'line_number': 1, 'source': './meow.txt'}),\n",
|
||||
" Document(page_content=' meow😻😻', metadata={'line_number': 2, 'source': './meow.txt'})]"
|
||||
"[Document(metadata={'line_number': 0, 'source': './meow.txt'}, page_content='meow meow🐱 \\n'),\n",
|
||||
" Document(metadata={'line_number': 1, 'source': './meow.txt'}, page_content=' meow meow🐱 \\n'),\n",
|
||||
" Document(metadata={'line_number': 2, 'source': './meow.txt'}, page_content=' meow😻😻')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -286,9 +345,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "209f6a91-2f15-4cb2-9237-f79fc9493b82",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.124363Z",
|
||||
"start_time": "2025-04-21T08:49:58.120782Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -313,9 +376,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"id": "b1275c59-06d4-458f-abd2-fcbad0bde442",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.172506Z",
|
||||
"start_time": "2025-04-21T08:49:58.167416Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -326,21 +393,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"id": "56a3d707-2086-413b-ae82-50e92ddb27f6",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.218426Z",
|
||||
"start_time": "2025-04-21T08:49:58.214684Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='meow meow🐱 \\n', metadata={'line_number': 1, 'source': './meow.txt'}),\n",
|
||||
" Document(page_content=' meow meow🐱 \\n', metadata={'line_number': 2, 'source': './meow.txt'}),\n",
|
||||
" Document(page_content=' meow😻😻', metadata={'line_number': 3, 'source': './meow.txt'})]"
|
||||
"[Document(metadata={'line_number': 1, 'source': './meow.txt'}, page_content='meow meow🐱 \\n'),\n",
|
||||
" Document(metadata={'line_number': 2, 'source': './meow.txt'}, page_content=' meow meow🐱 \\n'),\n",
|
||||
" Document(metadata={'line_number': 3, 'source': './meow.txt'}, page_content=' meow😻😻')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -359,20 +430,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 11,
|
||||
"id": "20d03092-ba35-47d7-b612-9d1631c261cd",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.267755Z",
|
||||
"start_time": "2025-04-21T08:49:58.264369Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='some data from memory\\n', metadata={'line_number': 1, 'source': None}),\n",
|
||||
" Document(page_content='meow', metadata={'line_number': 2, 'source': None})]"
|
||||
"[Document(metadata={'line_number': 1, 'source': None}, page_content='some data from memory\\n'),\n",
|
||||
" Document(metadata={'line_number': 2, 'source': None}, page_content='meow')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -394,9 +469,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 12,
|
||||
"id": "a9e92e0e-c8da-401c-b8c6-f0676004cf58",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.330432Z",
|
||||
"start_time": "2025-04-21T08:49:58.327223Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -406,9 +485,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 13,
|
||||
"id": "6b559d30-8b0c-4e45-86b1-e4602d9aaa7e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.383905Z",
|
||||
"start_time": "2025-04-21T08:49:58.380658Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -418,7 +501,7 @@
|
||||
"'utf-8'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -429,9 +512,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 14,
|
||||
"id": "2f7b145a-9c6f-47f9-9487-1f4b25aff46f",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.443829Z",
|
||||
"start_time": "2025-04-21T08:49:58.440222Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -441,7 +528,7 @@
|
||||
"b'meow meow\\xf0\\x9f\\x90\\xb1 \\n meow meow\\xf0\\x9f\\x90\\xb1 \\n meow\\xf0\\x9f\\x98\\xbb\\xf0\\x9f\\x98\\xbb'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -452,9 +539,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 15,
|
||||
"id": "9b9482fa-c49c-42cd-a2ef-80bc93214631",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.498609Z",
|
||||
"start_time": "2025-04-21T08:49:58.494903Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -464,7 +555,7 @@
|
||||
"'meow meow🐱 \\n meow meow🐱 \\n meow😻😻'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -475,19 +566,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 16,
|
||||
"id": "04cc7a81-290e-4ef8-b7e1-d885fcc59ece",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.551353Z",
|
||||
"start_time": "2025-04-21T08:49:58.547518Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<contextlib._GeneratorContextManager at 0x743f34324450>"
|
||||
"<contextlib._GeneratorContextManager at 0x74b8d42e9940>"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -498,9 +593,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 17,
|
||||
"id": "ec8de0ab-51d7-4e41-82c9-3ce0a6fdc2cd",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.599576Z",
|
||||
"start_time": "2025-04-21T08:49:58.596567Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -510,7 +609,7 @@
|
||||
"{'foo': 'bar'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -521,9 +620,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 18,
|
||||
"id": "19eae991-ae48-43c2-8952-7347cdb76a34",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.649634Z",
|
||||
"start_time": "2025-04-21T08:49:58.646313Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
@@ -533,7 +636,7 @@
|
||||
"'./meow.txt'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -551,65 +654,50 @@
|
||||
"\n",
|
||||
"While a parser encapsulates the logic needed to parse binary data into documents, *blob loaders* encapsulate the logic that's necessary to load blobs from a given storage location.\n",
|
||||
"\n",
|
||||
"At the moment, `LangChain` only supports `FileSystemBlobLoader`.\n",
|
||||
"At the moment, `LangChain` supports `FileSystemBlobLoader` and `CloudBlobLoader`.\n",
|
||||
"\n",
|
||||
"You can use the `FileSystemBlobLoader` to load blobs and then use the parser to parse them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 19,
|
||||
"id": "c093becb-2e84-4329-89e3-956a3bd765e5",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:49:58.718259Z",
|
||||
"start_time": "2025-04-21T08:49:58.705367Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader\n",
|
||||
"\n",
|
||||
"blob_loader = FileSystemBlobLoader(path=\".\", glob=\"*.mdx\", show_progress=True)"
|
||||
"filesystem_blob_loader = FileSystemBlobLoader(\n",
|
||||
" path=\".\", glob=\"*.mdx\", show_progress=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "77739dab-2a1e-4b64-8daa-fee8aa029972",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "45e85d3f63224bb59db02a40ae2e3268",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='# Microsoft Office\\n' metadata={'line_number': 1, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='# Markdown\\n' metadata={'line_number': 1, 'source': 'markdown.mdx'}\n",
|
||||
"page_content='# JSON\\n' metadata={'line_number': 1, 'source': 'json.mdx'}\n",
|
||||
"page_content='---\\n' metadata={'line_number': 1, 'source': 'pdf.mdx'}\n",
|
||||
"page_content='---\\n' metadata={'line_number': 1, 'source': 'index.mdx'}\n",
|
||||
"page_content='# File Directory\\n' metadata={'line_number': 1, 'source': 'file_directory.mdx'}\n",
|
||||
"page_content='# CSV\\n' metadata={'line_number': 1, 'source': 'csv.mdx'}\n",
|
||||
"page_content='# HTML\\n' metadata={'line_number': 1, 'source': 'html.mdx'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"id": "21b91bad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -q tqdm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "40be670b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"parser = MyParser()\n",
|
||||
"for blob in blob_loader.yield_blobs():\n",
|
||||
"for blob in filesystem_blob_loader.yield_blobs():\n",
|
||||
" for doc in parser.lazy_parse(blob):\n",
|
||||
" print(doc)\n",
|
||||
" break"
|
||||
@@ -620,56 +708,104 @@
|
||||
"id": "f016390c-d38b-4261-946d-34eefe546df7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generic Loader\n",
|
||||
"\n",
|
||||
"LangChain has a `GenericLoader` abstraction which composes a `BlobLoader` with a `BaseBlobParser`.\n",
|
||||
"\n",
|
||||
"`GenericLoader` is meant to provide standardized classmethods that make it easy to use existing `BlobLoader` implementations. At the moment, only the `FileSystemBlobLoader` is supported."
|
||||
"Or, you can use `CloudBlobLoader` to load blobs from a cloud storage location (Supports s3://, az://, gs://, file:// schemes)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "1de74daf-70ee-4616-9089-d28e26b16851",
|
||||
"execution_count": null,
|
||||
"id": "8210714e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -q 'cloudpathlib[s3]'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d3f84501-b0aa-4a60-aad2-5109cbd37d4f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"from cloudpathlib import S3Client, S3Path\n",
|
||||
"from langchain_community.document_loaders.blob_loaders import CloudBlobLoader\n",
|
||||
"\n",
|
||||
"client = S3Client(no_sign_request=True)\n",
|
||||
"client.set_as_default_client()\n",
|
||||
"\n",
|
||||
"path = S3Path(\n",
|
||||
" \"s3://bucket-01\", client=client\n",
|
||||
") # Supports s3://, az://, gs://, file:// schemes.\n",
|
||||
"\n",
|
||||
"cloud_loader = CloudBlobLoader(path, glob=\"**/*.pdf\", show_progress=True)\n",
|
||||
"\n",
|
||||
"for blob in cloud_loader.yield_blobs():\n",
|
||||
" print(blob)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "40c361ba4cd30164",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generic Loader\n",
|
||||
"\n",
|
||||
"LangChain has a `GenericLoader` abstraction which composes a `BlobLoader` with a `BaseBlobParser`.\n",
|
||||
"\n",
|
||||
"`GenericLoader` is meant to provide standardized classmethods that make it easy to use existing `BlobLoader` implementations. At the moment, the `FileSystemBlobLoader` and `CloudBlobLoader` are supported. See example below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "5dfb2be02fe662c5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:50:16.244917Z",
|
||||
"start_time": "2025-04-21T08:50:15.527562Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "5f1f6810a71a4909ac9fe1e8f8cb9e0a",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 7/7 [00:00<00:00, 1224.82it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='# Microsoft Office\\n' metadata={'line_number': 1, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='\\n' metadata={'line_number': 2, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='>[The Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\\n' metadata={'line_number': 3, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='\\n' metadata={'line_number': 4, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a document format that we can use downstream.\\n' metadata={'line_number': 5, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='# Text embedding models\n",
|
||||
"' metadata={'line_number': 1, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content='\n",
|
||||
"' metadata={'line_number': 2, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content=':::info\n",
|
||||
"' metadata={'line_number': 3, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content='Head to [Integrations](/docs/integrations/text_embedding/) for documentation on built-in integrations with text embedding model providers.\n",
|
||||
"' metadata={'line_number': 4, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content=':::\n",
|
||||
"' metadata={'line_number': 5, 'source': 'embed_text.mdx'}\n",
|
||||
"... output truncated for demo purposes\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.generic import GenericLoader\n",
|
||||
"\n",
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" path=\".\", glob=\"*.mdx\", show_progress=True, parser=MyParser()\n",
|
||||
"generic_loader_filesystem = GenericLoader(\n",
|
||||
" blob_loader=filesystem_blob_loader, blob_parser=parser\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for idx, doc in enumerate(loader.lazy_load()):\n",
|
||||
"for idx, doc in enumerate(generic_loader_filesystem.lazy_load()):\n",
|
||||
" if idx < 5:\n",
|
||||
" print(doc)\n",
|
||||
"\n",
|
||||
@@ -690,9 +826,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 28,
|
||||
"id": "23633102-dc44-4fed-a4e1-8159489101c8",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:50:34.841862Z",
|
||||
"start_time": "2025-04-21T08:50:34.838375Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -709,37 +849,46 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 29,
|
||||
"id": "dc95be85-4a29-4c6f-a260-08afa3c95538",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T08:50:34.901734Z",
|
||||
"start_time": "2025-04-21T08:50:34.888098Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "4320598ea3b44a52b1873e1c801db312",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/8 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 7/7 [00:00<00:00, 814.86it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='# Microsoft Office\\n' metadata={'line_number': 1, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='\\n' metadata={'line_number': 2, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='>[The Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.\\n' metadata={'line_number': 3, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='\\n' metadata={'line_number': 4, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a document format that we can use downstream.\\n' metadata={'line_number': 5, 'source': 'office_file.mdx'}\n",
|
||||
"page_content='# Text embedding models\n",
|
||||
"' metadata={'line_number': 1, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content='\n",
|
||||
"' metadata={'line_number': 2, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content=':::info\n",
|
||||
"' metadata={'line_number': 3, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content='Head to [Integrations](/docs/integrations/text_embedding/) for documentation on built-in integrations with text embedding model providers.\n",
|
||||
"' metadata={'line_number': 4, 'source': 'embed_text.mdx'}\n",
|
||||
"page_content=':::\n",
|
||||
"' metadata={'line_number': 5, 'source': 'embed_text.mdx'}\n",
|
||||
"... output truncated for demo purposes\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -769,7 +918,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"\n",
|
||||
"LangChain integrates with a host of PDF parsers. Some are simple and relatively low-level; others will support OCR and image-processing, or perform advanced document layout analysis. The right choice will depend on your needs. Below we enumerate the possibilities.\n",
|
||||
"\n",
|
||||
"We will demonstrate these approaches on a [sample file](https://github.com/langchain-ai/langchain/blob/master/libs/community/tests/integration_tests/examples/layout-parser-paper.pdf):"
|
||||
"We will demonstrate these approaches on a [sample file](https://github.com/langchain-ai/langchain-community/blob/main/libs/community/tests/examples/layout-parser-paper.pdf):"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"from langchain_core.globals import set_llm_cache\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# To make the caching really obvious, lets use a slower and older model.\n",
|
||||
"# To make the caching really obvious, let's use a slower and older model.\n",
|
||||
"# Caching supports newer chat models as well.\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
]
|
||||
|
||||
@@ -314,7 +314,7 @@
|
||||
"source": [
|
||||
"%env CMAKE_ARGS=\"-DLLAMA_METAL=on\"\n",
|
||||
"%env FORCE_CMAKE=1\n",
|
||||
"%pip install --upgrade --quiet llama-cpp-python --no-cache-dirclear"
|
||||
"%pip install --upgrade --quiet llama-cpp-python --no-cache-dir"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -212,6 +212,10 @@
|
||||
"[Anthropic](/docs/integrations/chat/anthropic/), and\n",
|
||||
"[Google Gemini](/docs/integrations/chat/google_generative_ai/)) will accept PDF documents.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"OpenAI requires file-names be specified for PDF inputs. When using LangChain's format, include the `filename` key. See [example below](#example-openai-file-names).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"### Documents from base64 data\n",
|
||||
"\n",
|
||||
"To pass documents in-line, format them as content blocks of the following form:\n",
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "39549336-25f5-4839-9846-f687cd77e59b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -110,43 +110,20 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'is_blocked': False,\n",
|
||||
" 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE',\n",
|
||||
" 'probability_score': 0.046142578125,\n",
|
||||
" 'blocked': False,\n",
|
||||
" 'severity': 'HARM_SEVERITY_NEGLIGIBLE',\n",
|
||||
" 'severity_score': 0.07275390625},\n",
|
||||
" {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE',\n",
|
||||
" 'probability_score': 0.05419921875,\n",
|
||||
" 'blocked': False,\n",
|
||||
" 'severity': 'HARM_SEVERITY_NEGLIGIBLE',\n",
|
||||
" 'severity_score': 0.03955078125},\n",
|
||||
" {'category': 'HARM_CATEGORY_HARASSMENT',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE',\n",
|
||||
" 'probability_score': 0.083984375,\n",
|
||||
" 'blocked': False,\n",
|
||||
" 'severity': 'HARM_SEVERITY_NEGLIGIBLE',\n",
|
||||
" 'severity_score': 0.029296875},\n",
|
||||
" {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n",
|
||||
" 'probability_label': 'NEGLIGIBLE',\n",
|
||||
" 'probability_score': 0.054931640625,\n",
|
||||
" 'blocked': False,\n",
|
||||
" 'severity': 'HARM_SEVERITY_NEGLIGIBLE',\n",
|
||||
" 'severity_score': 0.03466796875}],\n",
|
||||
" 'safety_ratings': [],\n",
|
||||
" 'usage_metadata': {'prompt_token_count': 10,\n",
|
||||
" 'candidates_token_count': 193,\n",
|
||||
" 'total_token_count': 203,\n",
|
||||
" 'candidates_token_count': 55,\n",
|
||||
" 'total_token_count': 65,\n",
|
||||
" 'prompt_tokens_details': [{'modality': 1, 'token_count': 10}],\n",
|
||||
" 'candidates_tokens_details': [{'modality': 1, 'token_count': 193}],\n",
|
||||
" 'candidates_tokens_details': [{'modality': 1, 'token_count': 55}],\n",
|
||||
" 'cached_content_token_count': 0,\n",
|
||||
" 'cache_tokens_details': []},\n",
|
||||
" 'finish_reason': 'STOP',\n",
|
||||
" 'avg_logprobs': -0.5702065976790196,\n",
|
||||
" 'model_name': 'gemini-1.5-flash-001'}"
|
||||
" 'avg_logprobs': -0.251378042047674,\n",
|
||||
" 'model_name': 'gemini-2.0-flash-001'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -154,7 +131,7 @@
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-1.5-flash-001\")\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\")\n",
|
||||
"msg = llm.invoke(\"What's the oldest known example of cuneiform\")\n",
|
||||
"msg.response_metadata"
|
||||
]
|
||||
|
||||
@@ -162,7 +162,7 @@
|
||||
"\n",
|
||||
"table_chain = prompt | llm_with_tools | output_parser\n",
|
||||
"\n",
|
||||
"table_chain.invoke({\"input\": \"What are all the genres of Alanis Morisette songs\"})"
|
||||
"table_chain.invoke({\"input\": \"What are all the genres of Alanis Morissette songs\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -206,7 +206,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"category_chain = prompt | llm_with_tools | output_parser\n",
|
||||
"category_chain.invoke({\"input\": \"What are all the genres of Alanis Morisette songs\"})"
|
||||
"category_chain.invoke({\"input\": \"What are all the genres of Alanis Morissette songs\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -261,7 +261,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"table_chain = category_chain | get_tables\n",
|
||||
"table_chain.invoke({\"input\": \"What are all the genres of Alanis Morisette songs\"})"
|
||||
"table_chain.invoke({\"input\": \"What are all the genres of Alanis Morissette songs\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -313,7 +313,7 @@
|
||||
],
|
||||
"source": [
|
||||
"query = full_chain.invoke(\n",
|
||||
" {\"question\": \"What are all the genres of Alanis Morisette songs\"}\n",
|
||||
" {\"question\": \"What are all the genres of Alanis Morissette songs\"}\n",
|
||||
")\n",
|
||||
"print(query)"
|
||||
]
|
||||
@@ -346,7 +346,7 @@
|
||||
"source": [
|
||||
"We can see the LangSmith trace for this run [here](https://smith.langchain.com/public/4fbad408-3554-4f33-ab47-1e510a1b52a3/r).\n",
|
||||
"\n",
|
||||
"We've seen how to dynamically include a subset of table schemas in a prompt within a chain. Another possible approach to this problem is to let an Agent decide for itself when to look up tables by giving it a Tool to do so. You can see an example of this in the [SQL: Agents](/docs/tutorials/agents) guide."
|
||||
"We've seen how to dynamically include a subset of table schemas in a prompt within a chain. Another possible approach to this problem is to let an Agent decide for itself when to look up tables by giving it a Tool to do so. You can see an example of this in the [SQL: Agents](/docs/tutorials/sql_qa/#agents) guide."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -555,7 +555,7 @@
|
||||
"source": [
|
||||
"We can see that with retrieval we're able to correct the spelling from \"Elenis Moriset\" to \"Alanis Morissette\" and get back a valid result.\n",
|
||||
"\n",
|
||||
"Another possible approach to this problem is to let an Agent decide for itself when to look up proper nouns. You can see an example of this in the [SQL: Agents](/docs/tutorials/agents) guide."
|
||||
"Another possible approach to this problem is to let an Agent decide for itself when to look up proper nouns. You can see an example of this in the [SQL: Agents](/docs/tutorials/sql_qa/#agents) guide."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -83,21 +83,28 @@ agent_executor.run("how many letters in the word educa?", callbacks=[handler])
|
||||
Another example:
|
||||
|
||||
```python
|
||||
from langchain.agents import load_tools, initialize_agent, AgentType
|
||||
from langchain_openai import OpenAI
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
import os
|
||||
|
||||
from langchain_community.agent_toolkits.load_tools import load_tools
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
os.environ["LLMONITOR_APP_ID"] = ""
|
||||
os.environ["OPENAI_API_KEY"] = ""
|
||||
os.environ["SERPAPI_API_KEY"] = ""
|
||||
|
||||
handler = LLMonitorCallbackHandler()
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
llm = ChatOpenAI(temperature=0, callbacks=[handler])
|
||||
tools = load_tools(["serpapi", "llm-math"], llm=llm)
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, metadata={ "agent_name": "GirlfriendAgeFinder" }) # <- recommended, assign a custom name
|
||||
agent = create_react_agent("openai:gpt-4.1-mini", tools)
|
||||
|
||||
agent.run(
|
||||
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?",
|
||||
callbacks=[handler],
|
||||
)
|
||||
input_message = {
|
||||
"role": "user",
|
||||
"content": "What's the weather in SF?",
|
||||
}
|
||||
|
||||
agent.invoke({"messages": [input_message]})
|
||||
```
|
||||
|
||||
## User Tracking
|
||||
@@ -110,7 +117,7 @@ with identify("user-123"):
|
||||
llm.invoke("Tell me a joke")
|
||||
|
||||
with identify("user-456", user_props={"email": "user456@test.com"}):
|
||||
agent.run("Who is Leo DiCaprio's girlfriend?")
|
||||
agent.invoke(...)
|
||||
```
|
||||
## Support
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"### Getting API Credentials\n",
|
||||
"\n",
|
||||
"If you do not have a PromptLayer account, create one on [promptlayer.com](https://www.promptlayer.com). Then get an API key by clicking on the settings cog in the navbar and\n",
|
||||
"set it as an environment variabled called `PROMPTLAYER_API_KEY`\n"
|
||||
"set it as an environment variable called `PROMPTLAYER_API_KEY`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"\n",
|
||||
"This notebook showcases the UpTrain callback handler seamlessly integrating into your pipeline, facilitating diverse evaluations. We have chosen a few evaluations that we deemed apt for evaluating the chains. These evaluations run automatically, with results displayed in the output. More details on UpTrain's evaluations can be found [here](https://github.com/uptrain-ai/uptrain?tab=readme-ov-file#pre-built-evaluations-we-offer-). \n",
|
||||
"\n",
|
||||
"Selected retievers from Langchain are highlighted for demonstration:\n",
|
||||
"Selected retrievers from Langchain are highlighted for demonstration:\n",
|
||||
"\n",
|
||||
"### 1. **Vanilla RAG**:\n",
|
||||
"RAG plays a crucial role in retrieving context and generating responses. To ensure its performance and response quality, we conduct the following evaluations:\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatAbso\n",
|
||||
"\n",
|
||||
"This will help you getting started with ChatAbso [chat models](https://python.langchain.com/docs/concepts/chat_models/). For detailed documentation of all ChatAbso features and configurations head to the [API reference](https://python.langchain.com/api_reference/en/latest/chat_models/langchain_abso.chat_models.ChatAbso.html).\n",
|
||||
"This will help you get started with ChatAbso [chat models](https://python.langchain.com/docs/concepts/chat_models/). For detailed documentation of all ChatAbso features and configurations, head to the [API reference](https://python.langchain.com/api_reference/en/latest/chat_models/langchain_abso.chat_models.ChatAbso.html).\n",
|
||||
"\n",
|
||||
"- You can find the full documentation for the Abso router [here] (https://abso.ai)\n",
|
||||
"\n",
|
||||
@@ -29,13 +29,13 @@
|
||||
"| [ChatAbso](https://python.langchain.com/api_reference/en/latest/chat_models/langchain_abso.chat_models.ChatAbso.html) | [langchain-abso](https://python.langchain.com/api_reference/en/latest/abso_api_reference.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"To access ChatAbso models you'll need to create an OpenAI account, get an API key, and install the `langchain-abso` integration package.\n",
|
||||
"To access ChatAbso models, you'll need to create an OpenAI account, get an API key, and install the `langchain-abso` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to ChatAbso and generate an API key. Once you've done this set the ABSO_API_KEY environment variable:"
|
||||
"Head to (TODO: link) to sign up for ChatAbso and generate an API key. Once you've done this, set the ABSO_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,7 +198,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
"source": [
|
||||
"# ChatAI21\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with AI21 chat models.\n",
|
||||
"Note that different chat models support different parameters. See the [AI21 documentation](https://docs.ai21.com/reference) to learn more about the parameters in your chosen model.\n",
|
||||
"[See all AI21's LangChain components.](https://pypi.org/project/langchain-ai21/)\n",
|
||||
@@ -68,7 +66,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "f6844fff-3702-4489-ab74-732f69f3b9d7",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -198,13 +198,17 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "39c0ccd229927eab",
|
||||
"metadata": {},
|
||||
"source": "# Tool Calls / Function Calling"
|
||||
"source": [
|
||||
"# Tool Calls / Function Calling"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bf6b40be07fe2d4",
|
||||
"metadata": {},
|
||||
"source": "This example shows how to use tool calling with AI21 models:"
|
||||
"source": [
|
||||
"This example shows how to use tool calling with AI21 models:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,9 +17,9 @@
|
||||
"source": [
|
||||
"# AzureAIChatCompletionsModel\n",
|
||||
"\n",
|
||||
"This will help you getting started with AzureAIChatCompletionsModel [chat models](/docs/concepts/chat_models). For detailed documentation of all AzureAIChatCompletionsModel features and configurations head to the [API reference](https://python.langchain.com/api_reference/azure_ai/chat_models/langchain_azure_ai.chat_models.AzureAIChatCompletionsModel.html)\n",
|
||||
"This will help you get started with AzureAIChatCompletionsModel [chat models](/docs/concepts/chat_models). For detailed documentation of all AzureAIChatCompletionsModel features and configurations, head to the [API reference](https://python.langchain.com/api_reference/azure_ai/chat_models/langchain_azure_ai.chat_models.AzureAIChatCompletionsModel.html)\n",
|
||||
"\n",
|
||||
"The AzureAIChatCompletionsModel class uses the Azure AI Foundry SDK. AI Foundry has several chat models including AzureOpenAI, Cohere, Llama, Phi-3/4, and DeepSeek-R1 to name a few. You can find information about their latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/azure/ai-studio/how-to/model-catalog-overview).\n",
|
||||
"The AzureAIChatCompletionsModel class uses the Azure AI Foundry SDK. AI Foundry has several chat models, including AzureOpenAI, Cohere, Llama, Phi-3/4, and DeepSeek-R1, among others. You can find information about their latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/azure/ai-studio/how-to/model-catalog-overview).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
@@ -37,12 +37,12 @@
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AzureAIChatCompletionsModel models you'll need to create an [Azure account](https://azure.microsoft.com/pricing/purchase-options/azure-account), get an API key, and install the `langchain-azure-ai` integration package.\n",
|
||||
"To access AzureAIChatCompletionsModel models, you'll need to create an [Azure account](https://azure.microsoft.com/pricing/purchase-options/azure-account), get an API key, and install the `langchain-azure-ai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/develop/sdk-overview?tabs=sync&pivots=programming-language-python) to see how to create your deployment and generate an API key. Once your model is deployed you click the 'get endpoint' button in AI Foundry. This will show you your endpoint and api key. Once you've done this set the AZURE_INFERENCE_CREDENTIAL and AZURE_INFERENCE_ENDPOINT environment variables:"
|
||||
"Head to the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/develop/sdk-overview?tabs=sync&pivots=programming-language-python) to see how to create your deployment and generate an API key. Once your model is deployed, you click the 'get endpoint' button in AI Foundry. This will show you your endpoint and api key. Once you've done this, set the AZURE_INFERENCE_CREDENTIAL and AZURE_INFERENCE_ENDPOINT environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,7 +71,7 @@
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
"If you want to get automated tracing of your model calls, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -247,13 +247,13 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all AzureAIChatCompletionsModel features and configurations head to the API reference: https://python.langchain.com/api_reference/azure_ai/chat_models/langchain_azure_ai.chat_models.AzureAIChatCompletionsModel.html"
|
||||
"For detailed documentation of all AzureAIChatCompletionsModel features and configurations, head to the API reference: https://python.langchain.com/api_reference/azure_ai/chat_models/langchain_azure_ai.chat_models.AzureAIChatCompletionsModel.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain-3-9",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -267,7 +267,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.19"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"# ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This will help you getting started with CloudflareWorkersAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatCloudflareWorkersAI features and configurations head to the [API reference](https://python.langchain.com/docs/integrations/chat/cloudflare_workersai/).\n",
|
||||
"This will help you get started with CloudflareWorkersAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatCloudflareWorkersAI features and configurations head to the [API reference](https://python.langchain.com/docs/integrations/chat/cloudflare_workersai/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
@@ -41,7 +41,7 @@
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to https://www.cloudflare.com/developer-platform/products/workers-ai/ to sign up to CloudflareWorkersAI and generate an API key. Once you've done this set the CF_API_KEY environment variable and the CF_ACCOUNT_ID environment variable:"
|
||||
"Head to https://www.cloudflare.com/developer-platform/products/workers-ai/ to sign up to CloudflareWorkersAI and generate an API key. Once you've done this set the CF_AI_API_KEY environment variable and the CF_ACCOUNT_ID environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,8 +56,8 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"CF_API_KEY\"):\n",
|
||||
" os.environ[\"CF_API_KEY\"] = getpass.getpass(\n",
|
||||
"if not os.getenv(\"CF_AI_API_KEY\"):\n",
|
||||
" os.environ[\"CF_AI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your CloudflareWorkersAI API key: \"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"source": [
|
||||
"# ChatContextual\n",
|
||||
"\n",
|
||||
"This will help you getting started with Contextual AI's Grounded Language Model [chat models](/docs/concepts/chat_models/).\n",
|
||||
"This will help you get started with Contextual AI's Grounded Language Model [chat models](/docs/concepts/chat_models/).\n",
|
||||
"\n",
|
||||
"To learn more about Contextual AI, please visit our [documentation](https://docs.contextual.ai/).\n",
|
||||
"\n",
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"# ChatDeepSeek\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This will help you getting started with DeepSeek's hosted [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatDeepSeek features and configurations head to the [API reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html).\n",
|
||||
"This will help you get started with DeepSeek's hosted [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatDeepSeek features and configurations head to the [API reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html).\n",
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
|
||||
@@ -25,17 +25,16 @@
|
||||
"source": [
|
||||
"**Deprecated Warning**\n",
|
||||
"\n",
|
||||
"We recommend users using `langchain_community.chat_models.ErnieBotChat` \n",
|
||||
"to use `langchain_community.chat_models.QianfanChatEndpoint` instead.\n",
|
||||
"We recommend users switch from `langchain_community.chat_models.ErnieBotChat` to `langchain_community.chat_models.QianfanChatEndpoint`.\n",
|
||||
"\n",
|
||||
"documentation for `QianfanChatEndpoint` is [here](/docs/integrations/chat/baidu_qianfan_endpoint/).\n",
|
||||
"\n",
|
||||
"they are 4 why we recommend users to use `QianfanChatEndpoint`:\n",
|
||||
"There are 4 reasons why we recommend users to use `QianfanChatEndpoint`:\n",
|
||||
"\n",
|
||||
"1. `QianfanChatEndpoint` support more LLM in the Qianfan platform.\n",
|
||||
"2. `QianfanChatEndpoint` support streaming mode.\n",
|
||||
"3. `QianfanChatEndpoint` support function calling usgage.\n",
|
||||
"4. `ErnieBotChat` is lack of maintenance and deprecated."
|
||||
"1. `QianfanChatEndpoint` supports more LLMs in the Qianfan platform.\n",
|
||||
"2. `QianfanChatEndpoint` supports streaming mode.\n",
|
||||
"3. `QianfanChatEndpoint` support function calling usage.\n",
|
||||
"4. `ErnieBotChat` is no longer maintained and has been deprecated."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -132,9 +131,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
308
docs/docs/integrations/chat/featherless_ai.ipynb
Normal file
308
docs/docs/integrations/chat/featherless_ai.ipynb
Normal file
@@ -0,0 +1,308 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Featherless AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatFeatherlessAi\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This will help you get started with FeatherlessAi [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatFeatherlessAi features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.ChatFeatherlessAi.html).\n",
|
||||
"\n",
|
||||
"- See https://featherless.ai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatFeatherlessAi](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.ChatFeatherlessAi.html) | [langchain-featherless-ai](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ✅| ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To access Featherless AI models you'll need to create a/an Featherless AI account, get an API key, and install the `langchain-featherless-ai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to https://featherless.ai/ to sign up to FeatherlessAI and generate an API key. Once you've done this set the FEATHERLESSAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"FEATHERLESSAI_API_KEY\"):\n",
|
||||
" os.environ[\"FEATHERLESSAI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your FeatherlessAI API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain FeatherlessAi integration lives in the `langchain-featherless-ai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-featherless-ai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_featherless_ai import ChatFeatherlessAi\n",
|
||||
"\n",
|
||||
"llm = ChatFeatherlessAi(\n",
|
||||
" model=\"featherless-ai/Qwerky-72B\",\n",
|
||||
" temperature=0.9,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"c:\\Python311\\Lib\\site-packages\\pydantic\\main.py:463: UserWarning: Pydantic serializer warnings:\n",
|
||||
" PydanticSerializationUnexpectedValue(Expected `int` - serialized value may not be as expected [input_value=1747322408.706, input_type=float])\n",
|
||||
" return self.__pydantic_serializer__.to_python(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'aime programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 27, 'total_tokens': 32, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'featherless-ai/Qwerky-72B', 'system_fingerprint': '', 'id': 'G1sgui', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--6ecbe184-c94e-4d03-bf75-9bd85b04ba5b-0', usage_metadata={'input_tokens': 27, 'output_tokens': 5, 'total_tokens': 32, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'aime programmer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fca9e713",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"c:\\Python311\\Lib\\site-packages\\pydantic\\main.py:463: UserWarning: Pydantic serializer warnings:\n",
|
||||
" PydanticSerializationUnexpectedValue(Expected `int` - serialized value may not be as expected [input_value=1747322423.487, input_type=float])\n",
|
||||
" return self.__pydantic_serializer__.to_python(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 22, 'total_tokens': 27, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'featherless-ai/Qwerky-72B', 'system_fingerprint': '', 'id': 'BoBqht', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--67464357-83d1-4591-9a62-303ed74b8148-0', usage_metadata={'input_tokens': 22, 'output_tokens': 5, 'total_tokens': 27, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatFeatherlessAi features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/.chat_models.ChatFeatherlessAi.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatFireworks\n",
|
||||
"\n",
|
||||
"This doc help you get started with Fireworks AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatFireworks features and configurations head to the [API reference](https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html).\n",
|
||||
"This doc helps you get started with Fireworks AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatFireworks features and configurations head to the [API reference](https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html).\n",
|
||||
"\n",
|
||||
"Fireworks AI is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n",
|
||||
"\n",
|
||||
@@ -39,7 +39,7 @@
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to (ttps://fireworks.ai/login to sign up to Fireworks and generate an API key. Once you've done this set the FIREWORKS_API_KEY environment variable:"
|
||||
"Head to (https://fireworks.ai/login to sign up to Fireworks and generate an API key. Once you've done this set the FIREWORKS_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"# GigaChat\n",
|
||||
"This notebook shows how to use LangChain with [GigaChat](https://developers.sber.ru/portal/products/gigachat).\n",
|
||||
"To use you need to install ```langchain_gigachat``` python package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true,
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-gigachat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"To get GigaChat credentials you need to [create account](https://developers.sber.ru/studio/login) and [get access to API](https://developers.sber.ru/docs/ru/gigachat/individuals-quickstart)\n",
|
||||
"\n",
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"if \"GIGACHAT_CREDENTIALS\" not in os.environ:\n",
|
||||
" os.environ[\"GIGACHAT_CREDENTIALS\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_gigachat import GigaChat\n",
|
||||
"\n",
|
||||
"chat = GigaChat(verify_ssl_certs=False, scope=\"GIGACHAT_API_PERS\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The capital of Russia is Moscow.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful AI that shares everything you know. Talk in English.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"What is capital of Russia?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"print(chat.invoke(messages).content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatGoodfire\n",
|
||||
"\n",
|
||||
"This will help you getting started with Goodfire [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoodfire features and configurations head to the [PyPI project page](https://pypi.org/project/langchain-goodfire/), or go directly to the [Goodfire SDK docs](https://docs.goodfire.ai/sdk-reference/example). All of the Goodfire-specific functionality (e.g. SAE features, variants, etc.) is available via the main `goodfire` package. This integration is a wrapper around the Goodfire SDK.\n",
|
||||
"This will help you get started with Goodfire [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoodfire features and configurations head to the [PyPI project page](https://pypi.org/project/langchain-goodfire/), or go directly to the [Goodfire SDK docs](https://docs.goodfire.ai/sdk-reference/example). All of the Goodfire-specific functionality (e.g. SAE features, variants, etc.) is available via the main `goodfire` package. This integration is a wrapper around the Goodfire SDK.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,269 +1,327 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n",
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n",
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28ccabbb-a450-403c-8de1-fb077e0b5d3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in tools\n",
|
||||
"\n",
|
||||
"Gemini supports a range of tools that are executed server-side.\n",
|
||||
"\n",
|
||||
"### Google search\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-google-vertexai>=2.0.11``\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Gemini can execute a Google search and use the results to [ground its responses](https://ai.google.dev/gemini-api/docs/grounding):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ffdbec37-85f8-4755-bd72-47efaecfe944",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"google_search\": {}}])\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is today's news?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f63824f5-7d6a-4ad7-aa17-1f5c44119a21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Code execution\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-google-vertexai>=2.0.25``\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Gemini can [generate and execute Python code](https://ai.google.dev/gemini-api/docs/code-execution):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aa079529-ef1c-463d-9d25-6390423a328d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"code_execution\": {}}])\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is 3^3?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatGroq\n",
|
||||
"\n",
|
||||
"This will help you getting started with Groq [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html). For a list of all Groq models, visit this [link](https://console.groq.com/docs/models?utm_source=langchain).\n",
|
||||
"This will help you get started with Groq [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html). For a list of all Groq models, visit this [link](https://console.groq.com/docs/models?utm_source=langchain).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# ChatHuggingFace\n",
|
||||
"\n",
|
||||
"This will help you getting started with `langchain_huggingface` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatHuggingFace` features and configurations head to the [API reference](https://python.langchain.com/api_reference/huggingface/chat_models/langchain_huggingface.chat_models.huggingface.ChatHuggingFace.html). For a list of models supported by Hugging Face check out [this page](https://huggingface.co/models).\n",
|
||||
"This will help you get started with `langchain_huggingface` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatHuggingFace` features and configurations head to the [API reference](https://python.langchain.com/api_reference/huggingface/chat_models/langchain_huggingface.chat_models.huggingface.ChatHuggingFace.html). For a list of models supported by Hugging Face check out [this page](https://huggingface.co/models).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
"# Install Langchain community and core packages\n",
|
||||
"%pip install --upgrade --quiet langchain-core langchain-community\n",
|
||||
"\n",
|
||||
"# Install Kineitca DB connection package\n",
|
||||
"# Install Kinetica DB connection package\n",
|
||||
"%pip install --upgrade --quiet 'gpudb>=7.2.0.8' typeguard pandas tqdm\n",
|
||||
"\n",
|
||||
"# Install packages needed for this tutorial\n",
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To get started and use **all** the features show below, we reccomend using a model that has been fine-tuned for tool-calling.\n",
|
||||
"To get started and use **all** the features shown below, we recommend using a model that has been fine-tuned for tool-calling.\n",
|
||||
"\n",
|
||||
"We will use [\n",
|
||||
"Hermes-2-Pro-Llama-3-8B-GGUF](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF) from NousResearch. \n",
|
||||
@@ -204,7 +204,7 @@
|
||||
"\n",
|
||||
"OpenAI has a [tool calling](https://platform.openai.com/docs/guides/function-calling) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
|
||||
"\n",
|
||||
"With `ChatLlamaCpp.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
|
||||
"With `ChatLlamaCpp.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood, these are converted to an OpenAI tool schema, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
@@ -404,7 +404,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatLlamaCpp features and configurations head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.llamacpp.ChatLlamaCpp.html"
|
||||
"For detailed documentation of all ChatLlamaCpp features and configurations, head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.llamacpp.ChatLlamaCpp.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -424,7 +424,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
"\n",
|
||||
"# Maritalk\n",
|
||||
"\n",
|
||||
"## Introduction\n",
|
||||
"\n",
|
||||
"MariTalk is an assistant developed by the Brazilian company [Maritaca AI](https://www.maritaca.ai).\n",
|
||||
"MariTalk is based on language models that have been specially trained to understand Portuguese well.\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatMistralAI\n",
|
||||
"\n",
|
||||
"This will help you getting started with Mistral [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatMistralAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html). The `ChatMistralAI` class is built on top of the [Mistral API](https://docs.mistral.ai/api/). For a list of all the models supported by Mistral, check out [this page](https://docs.mistral.ai/getting-started/models/).\n",
|
||||
"This will help you get started with Mistral [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatMistralAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html). The `ChatMistralAI` class is built on top of the [Mistral API](https://docs.mistral.ai/api/). For a list of all the models supported by Mistral, check out [this page](https://docs.mistral.ai/getting-started/models/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"\n",
|
||||
"ModelScope ([Home](https://www.modelscope.cn/) | [GitHub](https://github.com/modelscope/modelscope)) is built upon the notion of “Model-as-a-Service” (MaaS). It seeks to bring together most advanced machine learning models from the AI community, and streamlines the process of leveraging AI models in real-world applications. The core ModelScope library open-sourced in this repository provides the interfaces and implementations that allow developers to perform model inference, training and evaluation. \n",
|
||||
"\n",
|
||||
"This will help you getting started with ModelScope Chat Endpoint.\n",
|
||||
"This will help you get started with ModelScope Chat Endpoint.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
|
||||
618
docs/docs/integrations/chat/nebius.ipynb
Normal file
618
docs/docs/integrations/chat/nebius.ipynb
Normal file
@@ -0,0 +1,618 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nebius\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2970dd75-8ebf-4b51-8282-9b454b8f356d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nebius Chat Models\n",
|
||||
"\n",
|
||||
"This page will help you get started with Nebius AI Studio [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatNebius features and configurations head to the [API reference](https://python.langchain.com/api_reference/nebius/chat_models/langchain_nebius.chat_models.ChatNebius.html).\n",
|
||||
"\n",
|
||||
"[Nebius AI Studio](https://studio.nebius.ai/) provides API access to a wide range of state-of-the-art large language models and embedding models for various use cases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d8a2e78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatNebius](https://python.langchain.com/api_reference/nebius/chat_models/langchain_nebius.chat_models.ChatNebius.html) | [langchain-nebius](https://python.langchain.com/api_reference/nebius/index.html) | ❌ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c47fc36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Nebius models you'll need to create a Nebius account, get an API key, and install the `langchain-nebius` integration package.\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The Nebius integration can be installed via pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ecdb29d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade langchain-nebius"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89883202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Nebius requires an API key that can be passed as an initialization parameter `api_key` or set as the environment variable `NEBIUS_API_KEY`. You can obtain an API key by creating an account on [Nebius AI Studio](https://studio.nebius.ai/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "637bb53f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make sure you've set your API key as an environment variable\n",
|
||||
"if \"NEBIUS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"NEBIUS_API_KEY\"] = getpass.getpass(\"Enter your Nebius API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "37e9dc05-md",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object to generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "37e9dc05",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nebius import ChatNebius\n",
|
||||
"\n",
|
||||
"# Initialize the chat model\n",
|
||||
"chat = ChatNebius(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"Qwen/Qwen3-14B\", # Choose from available models\n",
|
||||
" temperature=0.6,\n",
|
||||
" top_p=0.95,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a731d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"You can use the `invoke` method to get a completion from the model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3ed26f78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, so I need to explain quantum computing in simple terms. Hmm, where do I start? Let me think. I know that quantum computing uses qubits instead of classical bits. But what's a qubit? Oh right, classical bits are 0 or 1, but qubits can be both at the same time, right? That's superposition. Wait, how does that work exactly?\n",
|
||||
"\n",
|
||||
"Maybe I should start by comparing it to regular computers. Regular computers use bits that are either 0 or 1. Like a light switch that's either on or off. Quantum computers use qubits, which can be in a state of 0, 1, or both at the same time. That's the superposition part. So, if you have two qubits, they can represent four states at once? Like 00, 01, 10, 11 all at the same time? That seems powerful. So with more qubits, the number of possible states grows exponentially. That's why quantum computers can process a lot of information quickly.\n",
|
||||
"\n",
|
||||
"But then there's entanglement. What's that? If two qubits are entangled, the state of one instantly affects the other, no matter the distance. So if you measure one, you know the state of the other. That's used in quantum algorithms, I think. But how does that help in computing?\n",
|
||||
"\n",
|
||||
"Also, quantum computers use quantum gates instead of classical logic gates. These gates manipulate qubits through operations like Hadamard, Pauli, etc. But maybe that's too technical for a simple explanation.\n",
|
||||
"\n",
|
||||
"Then there's the issue of decoherence. Qubits are fragile and can lose their quantum state quickly. That's why quantum computers need to be kept at very low temperatures, like near absolute zero, to minimize interference from the environment. But maybe I shouldn't mention that unless it's relevant for the simple explanation.\n",
|
||||
"\n",
|
||||
"Applications of quantum computing include things like factoring large numbers (Shor's algorithm), which is important for cryptography, or simulating quantum systems for chemistry and materials science. But again, maybe keep it simple.\n",
|
||||
"\n",
|
||||
"Wait, the user wants it in simple terms. So avoid jargon as much as possible. Use analogies. Maybe compare qubits to spinning coins? When a coin is spinning, it's both heads and tails until it lands. So qubits are like spinning coins that can be in multiple states until measured. Then, when you measure, it collapses to a single state.\n",
|
||||
"\n",
|
||||
"But how does that help in computation? Maybe think of it as being able to process many possibilities at once, so for certain problems, you can find the answer faster. Like solving a maze by checking all paths at the same time instead of one by one.\n",
|
||||
"\n",
|
||||
"Also, mention that quantum computers aren't replacing classical computers. They're better for specific tasks, like optimization, cryptography, or simulations that are hard for classical computers. But for everyday tasks, classical computers are still better.\n",
|
||||
"\n",
|
||||
"I should structure this: start with classical bits vs qubits, explain superposition and entanglement with simple analogies, mention how it's used, and note the current limitations. Avoid getting too technical, keep it conversational.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"Quantum computing is a type of computing that uses the principles of **quantum mechanics** to process information in ways that classical computers can't. Here's a simple breakdown:\n",
|
||||
"\n",
|
||||
"### 1. **Bits vs. Qubits** \n",
|
||||
" - **Classical computers** use *bits*, which are like switches that can be either **0** (off) or **1** (on). \n",
|
||||
" - **Quantum computers** use *qubits*, which are like \"spinning coins.\" While spinning, a qubit can be **0**, **1**, or **both at the same time** (this is called **superposition**). Only when you \"look\" at the qubit (measure it) does it settle into a definite state (0 or 1).\n",
|
||||
"\n",
|
||||
"### 2. **Superposition: Doing Many Things at Once** \n",
|
||||
" - Imagine a coin spinning in the air. While it's spinning, it’s not just \"heads\" or \"tails\"—it’s a mix of both. \n",
|
||||
" - With qubits, a quantum computer can process **many possibilities simultaneously**. For example, if you have 2 qubits, they can represent 4 states (00, 01, 10, 11) at once. With 10 qubits, it can represent **1,024 states** at the same time! This lets quantum computers solve certain problems much faster than classical computers.\n",
|
||||
"\n",
|
||||
"### 3. **Entanglement: Qubits \"Talk\" to Each Other** \n",
|
||||
" - When qubits are **entangled**, their states are linked. If you measure one, it instantly affects the other, no matter how far apart they are. \n",
|
||||
" - This connection allows quantum computers to perform complex calculations more efficiently, like solving puzzles where pieces are deeply interconnected.\n",
|
||||
"\n",
|
||||
"### 4. **Why It Matters** \n",
|
||||
" - **Speed**: For specific tasks (like breaking encryption codes or simulating molecules), quantum computers could be **exponentially faster** than classical ones. \n",
|
||||
" - **New Possibilities**: They could revolutionize fields like drug discovery, materials science, and optimization problems (e.g., finding the best route for delivery trucks).\n",
|
||||
"\n",
|
||||
"### 5. **Limitations** \n",
|
||||
" - **Fragile**: Qubits are sensitive to their environment (heat, noise), so quantum computers need extreme cooling (near absolute zero) to work. \n",
|
||||
" - **Not a Replacement**: They’re not better for everyday tasks like browsing the web or sending emails. They’re tools for **specialized problems** where classical computers struggle.\n",
|
||||
"\n",
|
||||
"### In Short: \n",
|
||||
"Quantum computing is like having a magic calculator that can explore many paths at once, solving certain problems in seconds that would take a classical computer years. But it’s still in its early days and needs careful handling to work properly! 🌌\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chat.invoke(\"Explain quantum computing in simple terms\")\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72f31d5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"You can also stream the response using the `stream` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e7b7170d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, the user wants a short poem about artificial intelligence. Let me start by thinking about the key aspects of AI. There's the technological side, like machines learning and processing data. Then there's the more philosophical angle, like AI's impact on society and its potential future.\n",
|
||||
"\n",
|
||||
"I should consider the structure. Maybe a simple rhyme scheme, something like ABAB or AABB. Let me go with quatrains for simplicity. Now, imagery: circuits, code, neural networks. Maybe personify AI as a mind or entity.\n",
|
||||
"\n",
|
||||
"First stanza: Introduce AI as a creation of humans. Mention circuits and code. Maybe something about learning from data. \"Born from circuits, code, and light\" – that's a good opening line. Then talk about learning from human minds.\n",
|
||||
"\n",
|
||||
"Second stanza: Contrast human emotions with AI's logic. Use words like \"cold logic\" versus \"human hearts.\" Maybe touch on the duality of AI's purpose – tools versus potential threats.\n",
|
||||
"\n",
|
||||
"Third stanza: Address the ethical questions. \"Will it dream?\" \"Will it choose?\" Highlight the uncertainty and the responsibility of creators.\n",
|
||||
"\n",
|
||||
"Fourth stanza: Conclude with the coexistence of AI and humans. Emphasize collaboration and the balance between innovation and ethics. End on a hopeful note, maybe about shaping the future together.\n",
|
||||
"\n",
|
||||
"Check the flow and rhyme. Make sure each stanza connects and the message is clear. Avoid technical jargon to keep it accessible. Use metaphors like \"silent pulse\" or \"ghost in the machine\" to add depth. Okay, let me put it all together now.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"**Echoes of the Mind** \n",
|
||||
"\n",
|
||||
"Born from circuits, code, and light, \n",
|
||||
"A whisper in the machine’s night— \n",
|
||||
"It learns from data, vast and deep, \n",
|
||||
"A mirror to the human leap. \n",
|
||||
"\n",
|
||||
"No heartbeat, yet it calculates, \n",
|
||||
"Deciphers truths, predicts, debates. \n",
|
||||
"A cold logic, sharp and bright, \n",
|
||||
"Yet shadows dance in its insight. \n",
|
||||
"\n",
|
||||
"Will it dream? Will it choose? \n",
|
||||
"Or merely serve, as we pursue \n",
|
||||
"The edges of our own design? \n",
|
||||
"A ghost in the machine, undefined. \n",
|
||||
"\n",
|
||||
"We forge it, bind it, set it free— \n",
|
||||
"A tool, a threat, a mystery. \n",
|
||||
"But in its pulse, our hopes reside: \n",
|
||||
"A future shaped by minds allied."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat.stream(\"Write a short poem about artificial intelligence\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d6a31c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Chat Messages\n",
|
||||
"\n",
|
||||
"You can use different message types to structure your conversations with the model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5d81af33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, the user asked how black holes are formed. Let me start by recalling the main processes. Stellar black holes form from massive stars. When a star with enough mass runs out of fuel, it can't support itself against gravity, leading to a supernova. If the core left after the supernova is more than about 3 times the Sun's mass, it collapses into a black hole.\n",
|
||||
"\n",
|
||||
"Then there are supermassive black holes, which are found at the centers of galaxies. Their formation is less understood. Maybe they start as smaller black holes and grow by merging with others or accreting matter over time. Also, there's the possibility of primordial black holes formed in the early universe, but that's more theoretical.\n",
|
||||
"\n",
|
||||
"I should mention the different types of black holes: stellar, supermassive, and maybe intermediate. Also, the event horizon and singularity concepts. Need to explain the process step by step, from the death of a star to the collapse. Make sure to clarify that not all stars become black holes—only those with sufficient mass. Maybe touch on the Chandrasekhar limit and Oppenheimer-Volkoff limit. Avoid too much jargon but still be precise. Check if the user might be a student or just curious, so keep it clear and structured.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"Black holes are formed through the collapse of massive stars or through other extreme astrophysical processes. Here's a breakdown of the main formation mechanisms:\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **1. Stellar Black Holes (Most Common)**\n",
|
||||
"- **Origin**: Massive stars (typically **more than 20–25 times the mass of the Sun**).\n",
|
||||
"- **Process**:\n",
|
||||
" 1. **Stellar Evolution**: These stars burn through their nuclear fuel (hydrogen, helium, etc.) over millions of years.\n",
|
||||
" 2. **Supernova Explosion**: When the star exhausts its fuel, it can no longer support itself against gravity. The core collapses, triggering a **supernova explosion** (a massive stellar explosion).\n",
|
||||
" 3. **Core Collapse**: If the remaining core (after the supernova) is **more than about 3 times the mass of the Sun**, gravity overpowers all other forces. The core collapses into an **infinitely dense point** called a **singularity**, surrounded by an **event horizon** (the \"point of no return\" for light and matter).\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **2. Supermassive Black Holes (Found in Galaxy Centers)**\n",
|
||||
"- **Mass**: Millions to billions of times the mass of the Sun.\n",
|
||||
"- **Formation Theories**:\n",
|
||||
" - **Accretion**: They may form from the gradual accumulation of matter (gas, dust, stars) over billions of years.\n",
|
||||
" - **Mergers**: Smaller black holes (or dense star clusters) could merge to form supermassive ones.\n",
|
||||
" - **Direct Collapse**: Some theories suggest they could form from the direct collapse of massive gas clouds in the early universe, bypassing the stellar life cycle.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **3. Intermediate-Mass Black Holes**\n",
|
||||
"- **Mass**: Hundreds to thousands of solar masses.\n",
|
||||
"- **Formation**: Less understood. They might form through the mergers of stellar black holes or from the collapse of unusually massive stars.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **4. Primordial Black Holes (Hypothetical)**\n",
|
||||
"- **Origin**: The early universe (within seconds after the Big Bang).\n",
|
||||
"- **Formation**: If density fluctuations in the early universe were extreme enough, regions of space could have collapsed directly into black holes without going through a stellar life cycle.\n",
|
||||
"- **Status**: These are still theoretical and have not been definitively observed.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **Key Concepts**\n",
|
||||
"- **Event Horizon**: The boundary around a black hole from which nothing (not even light) can escape.\n",
|
||||
"- **Singularity**: The infinitely dense core of a black hole where the laws of physics as we know them break down.\n",
|
||||
"- **Gravitational Collapse**: The process by which gravity compresses matter into an extremely small space, creating the extreme conditions of a black hole.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **What Happens to the Star?**\n",
|
||||
"- If the star is **not massive enough** (below ~20–25 solar masses), it may end as a **neutron star** or **white dwarf** instead of a black hole.\n",
|
||||
"- Only the **core** of the star collapses into a black hole; the outer layers are expelled in the supernova explosion.\n",
|
||||
"\n",
|
||||
"Would you like to explore the effects of black holes on spacetime or their role in the universe?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI assistant with expertise in science.\"),\n",
|
||||
" HumanMessage(content=\"What are black holes?\"),\n",
|
||||
" AIMessage(\n",
|
||||
" content=\"Black holes are regions of spacetime where gravity is so strong that nothing, including light, can escape from them.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"How are they formed?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a4d21c6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Parameters\n",
|
||||
"\n",
|
||||
"You can customize the chat model behavior using various parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b4c83fb2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"DNA, or deoxyribonucleic acid, is a molecule that contains the genetic instructions used in the development and function of all living organisms. It is often referred to as the \"building blocks of life\" because it carries the information necessary for the creation and growth of cells, tissues, and entire organisms. The DNA molecule is made up of two complementary strands of nucleotides that are twisted together in a double helix structure, with the sequence of these nucleotides determining the genetic code\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Initialize with custom parameters\n",
|
||||
"custom_chat = ChatNebius(\n",
|
||||
" model=\"meta-llama/Llama-3.3-70B-Instruct-fast\",\n",
|
||||
" max_tokens=100, # Limit response length\n",
|
||||
" top_p=0.01, # Lower nucleus sampling parameter for more deterministic responses\n",
|
||||
" request_timeout=30, # Timeout in seconds\n",
|
||||
" stop=[\"###\", \"\\n\\n\"], # Custom stop sequences\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = custom_chat.invoke(\"Explain what DNA is in exactly 3 sentences.\")\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea9f237c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also pass parameters at invocation time:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cd4e83c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why do programmers prefer dark mode?\n",
|
||||
"\n",
|
||||
"Because light attracts bugs.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Standard model\n",
|
||||
"standard_chat = ChatNebius(model=\"meta-llama/Llama-3.3-70B-Instruct-fast\")\n",
|
||||
"\n",
|
||||
"# Override parameters at invocation time\n",
|
||||
"response = standard_chat.invoke(\n",
|
||||
" \"Tell me a joke about programming\",\n",
|
||||
" temperature=0.9, # More creative for jokes\n",
|
||||
" max_tokens=50, # Keep it short\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e8a40f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async Support\n",
|
||||
"\n",
|
||||
"ChatNebius supports async operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8fc36122",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async response: <think>\n",
|
||||
"Okay, the user is asking for the capital of France. Let me think. I know that France is a country in Europe, and its capital is Paris. But wait, I should make sure I'm not confusing it with another country. For example, Germany's capital is Berlin, and Spain's is Madrid. France's capital is definitely Paris. I remember that Paris is a major city known for landmarks like the Eiffel Tower and the Louvre Museum. Also, the French government is based there, with the Elysée Palace as the official residence of the President. I don't think there's any ambiguity here. The answer should be straightforward. Just need to confirm once more to avoid any mistakes.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"The capital of France is **Paris**. It is a major global city known for its cultural, artistic, and historical significance, as well as landmarks such as the Eiffel Tower, Louvre Museum, and Notre-Dame Cathedral.\n",
|
||||
"\n",
|
||||
"Async streaming:\n",
|
||||
"<think>\n",
|
||||
"Okay, the user is asking for the capital of Germany. Let me think. I know that Germany is a country in Europe, and I remember that Berlin is the capital. Wait, but I should make sure. Sometimes people confuse capitals with other major cities, like Munich or Frankfurt. But no, Berlin is definitely the capital. It's where the government is located, and it's a major city. Let me double-check. Yes, after reunification in 1990, Berlin became the capital again. Before that, Bonn was the capital, but that was during the division of Germany. So the answer should be Berlin. I should also mention that it's the largest city in Germany. That way, the user gets a complete answer.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"The capital of Germany is **Berlin**. It is also the largest city in the country and serves as the political, cultural, and economic center of Germany. Berlin became the capital in 1990 following the reunification of East and West Germany."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def generate_async():\n",
|
||||
" response = await chat.ainvoke(\"What is the capital of France?\")\n",
|
||||
" print(\"Async response:\", response.content)\n",
|
||||
"\n",
|
||||
" # Async streaming\n",
|
||||
" print(\"\\nAsync streaming:\")\n",
|
||||
" async for chunk in chat.astream(\"What is the capital of Germany?\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await generate_async()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a53a6bab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Available Models\n",
|
||||
"\n",
|
||||
"The full list of supported models can be found in the [Nebius AI Studio Documentation](https://studio.nebius.com/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4aa82e17",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can use `ChatNebius` in LangChain chains and agents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7e78e429",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, the user asked me to explain how the internet works, but I need to do it in the style of Shakespeare. Let me start by recalling how the internet functions. It's a network of interconnected devices communicating via protocols like TCP/IP. Data is broken into packets, sent through routers, and reassembled at the destination.\n",
|
||||
"\n",
|
||||
"Now, translating that into Shakespearean language. I should use archaic terms and a poetic structure. Words like \"thou,\" \"doth,\" \"hark,\" and \"verily\" come to mind. Maybe start with a metaphor, like comparing the internet to a vast tapestry or a web. Mention nodes as \"nodes\" or \"stations,\" data packets as \"messengers\" or \"letters.\" Routers could be \"wayfarers\" or \"guides.\" The process of breaking data into packets might be likened to dividing a letter into parts for delivery. Emphasize the global aspect with \"across the globe\" or \"far and wide.\" Conclude with a flourish, perhaps a metaphor about connection and knowledge.\n",
|
||||
"\n",
|
||||
"I need to ensure the explanation is accurate but wrapped in the poetic and dramatic style of Shakespeare. Avoid modern jargon, use iambic pentameter if possible, and keep the flow natural. Let me piece it together step by step, checking that each part of the internet's function is covered metaphorically.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"Hark! List thy ear, good friend, to this most wondrous tale, \n",
|
||||
"Of threads unseen that bind the world in one grand tale. \n",
|
||||
"The Internet, a net most vast, doth span the globe, \n",
|
||||
"A labyrinth of light, where thoughts and data rove. \n",
|
||||
"\n",
|
||||
"Behold! Each device, a node, doth hum and sing, \n",
|
||||
"Linked by wires and waves, where signals doth spring. \n",
|
||||
"They speak in tongues of ones and naughts, so pure, \n",
|
||||
"A code most ancient, yet evermore secure. \n",
|
||||
"\n",
|
||||
"When thou dost send a thought, or word, or song, \n",
|
||||
"It breaks to parcels small, like letters on a long. \n",
|
||||
"Each parcel, a messenger, doth seek its way, \n",
|
||||
"Through routers wise, who guide them 'cross the day. \n",
|
||||
"\n",
|
||||
"These wayfarers, with logic keen and bright, \n",
|
||||
"Choose paths most swift, through highways of light. \n",
|
||||
"They leap from tower to tower, far and wide, \n",
|
||||
"Till each parcel finds its mark, and joins the guide. \n",
|
||||
"\n",
|
||||
"Then, like a scroll unrolled, the message grows, \n",
|
||||
"A tapestry of bits, in order it flows. \n",
|
||||
"Thus, thou dost speak to friend, or seek a tome, \n",
|
||||
"And lo! The world doth answer, quick as home. \n",
|
||||
"\n",
|
||||
"So mark this truth: though vast, it's but a thread, \n",
|
||||
"A web of minds, where knowledge is widespread. \n",
|
||||
"The Internet, a stage where all may play, \n",
|
||||
"And none shall be alone, though far away.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# Create a prompt template\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that answers in the style of {character}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create a chain\n",
|
||||
"chain = prompt | chat | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# Invoke the chain\n",
|
||||
"response = chain.invoke(\n",
|
||||
" {\"character\": \"Shakespeare\", \"query\": \"Explain how the internet works\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f7a35f40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more details about the Nebius AI Studio API, visit the [Nebius AI Studio Documentation](https://studio.nebius.com/api-reference)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "354ffc01",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatNetmind\n",
|
||||
"\n",
|
||||
"This will help you getting started with Netmind [chat models](https://www.netmind.ai/). For detailed documentation of all ChatNetmind features and configurations head to the [API reference](https://github.com/protagolabs/langchain-netmind).\n",
|
||||
"This will help you get started with Netmind [chat models](https://www.netmind.ai/). For detailed documentation of all ChatNetmind features and configurations head to the [API reference](https://github.com/protagolabs/langchain-netmind).\n",
|
||||
"\n",
|
||||
"- See https://www.netmind.ai/ for an example.\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatNVIDIA\n",
|
||||
"\n",
|
||||
"This will help you getting started with NVIDIA [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatNVIDIA` features and configurations head to the [API reference](https://python.langchain.com/api_reference/nvidia_ai_endpoints/chat_models/langchain_nvidia_ai_endpoints.chat_models.ChatNVIDIA.html).\n",
|
||||
"This will help you get started with NVIDIA [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatNVIDIA` features and configurations head to the [API reference](https://python.langchain.com/api_reference/nvidia_ai_endpoints/chat_models/langchain_nvidia_ai_endpoints.chat_models.ChatNVIDIA.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"The `langchain-nvidia-ai-endpoints` package contains LangChain integrations building applications with models on\n",
|
||||
@@ -318,7 +318,7 @@
|
||||
"source": [
|
||||
"### Code Generation\n",
|
||||
"\n",
|
||||
"These models accept the same arguments and input structure as regular chat models, but they tend to perform better on code-genreation and structured code tasks. An example of this is `meta/codellama-70b`."
|
||||
"These models accept the same arguments and input structure as regular chat models, but they tend to perform better on code-generation and structured code tasks. An example of this is `meta/codellama-70b`."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"source": [
|
||||
"# ChatOCIModelDeployment\n",
|
||||
"\n",
|
||||
"This will help you getting started with OCIModelDeployment [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatOCIModelDeployment features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.oci_data_science.ChatOCIModelDeployment.html).\n",
|
||||
"This will help you get started with OCIModelDeployment [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatOCIModelDeployment features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.oci_data_science.ChatOCIModelDeployment.html).\n",
|
||||
"\n",
|
||||
"[OCI Data Science](https://docs.oracle.com/en-us/iaas/data-science/using/home.htm) is a fully managed and serverless platform for data science teams to build, train, and manage machine learning models in the Oracle Cloud Infrastructure. You can use [AI Quick Actions](https://blogs.oracle.com/ai-and-datascience/post/ai-quick-actions-in-oci-data-science) to easily deploy LLMs on [OCI Data Science Model Deployment Service](https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-about.htm). You may choose to deploy the model with popular inference frameworks such as vLLM or TGI. By default, the model deployment endpoint mimics the OpenAI API protocol.\n",
|
||||
"\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"1. Get an API Token from [your OctoAI account page](https://octoai.cloud/settings).\n",
|
||||
" \n",
|
||||
"2. Paste your API token in in the code cell below or use the `octoai_api_token` keyword argument.\n",
|
||||
"2. Paste your API token in the code cell below or use the `octoai_api_token` keyword argument.\n",
|
||||
"\n",
|
||||
"Note: If you want to use a different model than the [available models](https://octoai.cloud/text?selectedTags=Chat), you can containerize the model and make a custom OctoAI endpoint yourself, by following [Build a Container from Python](https://octo.ai/docs/bring-your-own-model/advanced-build-a-container-from-scratch-in-python) and [Create a Custom Endpoint from a Container](https://octo.ai/docs/bring-your-own-model/create-custom-endpoints-from-a-container/create-custom-endpoints-from-a-container) and then updating your `OCTOAI_API_BASE` environment variable.\n"
|
||||
]
|
||||
@@ -99,7 +99,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.12.10"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatOutlines\n",
|
||||
"\n",
|
||||
"This will help you getting started with Outlines [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatOutlines features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.outlines.ChatOutlines.html).\n",
|
||||
"This will help you get started with Outlines [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatOutlines features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.outlines.ChatOutlines.html).\n",
|
||||
"\n",
|
||||
"[Outlines](https://github.com/outlines-dev/outlines) is a library for constrained language generation. It allows you to use large language models (LLMs) with various backends while applying constraints to the generated output.\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"source": [
|
||||
"# ChatPipeshift\n",
|
||||
"\n",
|
||||
"This will help you getting started with Pipeshift [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatPipeshift features and configurations head to the [API reference](https://dashboard.pipeshift.com/docs).\n",
|
||||
"This will help you get started with Pipeshift [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatPipeshift features and configurations head to the [API reference](https://dashboard.pipeshift.com/docs).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -4,93 +4,99 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "3f0a201c",
|
||||
"metadata": {},
|
||||
"source": "# ChatPredictionGuard"
|
||||
"source": [
|
||||
"# ChatPredictionGuard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": ">[Prediction Guard](https://predictionguard.com) is a secure, scalable GenAI platform that safeguards sensitive data, prevents common AI malfunctions, and runs on affordable hardware.\n",
|
||||
"id": "c3adc2aac37985ac"
|
||||
"id": "c3adc2aac37985ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
">[Prediction Guard](https://predictionguard.com) is a secure, scalable GenAI platform that safeguards sensitive data, prevents common AI malfunctions, and runs on affordable hardware.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": "## Overview",
|
||||
"id": "4e1ec341481fb244"
|
||||
"id": "4e1ec341481fb244",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "b4090b7489e37a91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Integration details\n",
|
||||
"This integration utilizes the Prediction Guard API, which includes various safeguards and security features."
|
||||
],
|
||||
"id": "b4090b7489e37a91"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "e26e5b3240452162",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Model features\n",
|
||||
"The models supported by this integration only feature text-generation currently, along with the input and output checks described here."
|
||||
],
|
||||
"id": "e26e5b3240452162"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "4fca548b61efb049",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"To access Prediction Guard models, contact us [here](https://predictionguard.com/get-started) to get a Prediction Guard API key and get started. "
|
||||
],
|
||||
"id": "4fca548b61efb049"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7cc34a9cd865690c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"Once you have a key, you can set it with "
|
||||
],
|
||||
"id": "7cc34a9cd865690c"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fa57fba89276da13",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T18:23:30.746350Z",
|
||||
"start_time": "2025-04-21T18:23:30.744744Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"PREDICTIONGUARD_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"PREDICTIONGUARD_API_KEY\"] = \"<Your Prediction Guard API Key>\""
|
||||
],
|
||||
"id": "fa57fba89276da13",
|
||||
"outputs": [],
|
||||
"execution_count": 2
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "87dc1742af7b053",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"Install the Prediction Guard Langchain integration with"
|
||||
],
|
||||
"id": "87dc1742af7b053"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b816ae8553cba021",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T18:23:33.359278Z",
|
||||
"start_time": "2025-04-21T18:23:32.853207Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": "%pip install -qU langchain-predictionguard",
|
||||
"id": "b816ae8553cba021",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -100,7 +106,9 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 3
|
||||
"source": [
|
||||
"%pip install -qU langchain-predictionguard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -108,63 +116,61 @@
|
||||
"metadata": {
|
||||
"id": "mesCTyhnJkNS"
|
||||
},
|
||||
"source": "## Instantiation"
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7191a5ce",
|
||||
"metadata": {
|
||||
"id": "2xe8JEUwA7_y",
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T18:23:39.812675Z",
|
||||
"start_time": "2025-04-21T18:23:39.666881Z"
|
||||
}
|
||||
},
|
||||
"id": "2xe8JEUwA7_y"
|
||||
},
|
||||
"source": "from langchain_predictionguard import ChatPredictionGuard",
|
||||
"outputs": [],
|
||||
"execution_count": 4
|
||||
"source": [
|
||||
"from langchain_predictionguard import ChatPredictionGuard"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "140717c9",
|
||||
"metadata": {
|
||||
"id": "Ua7Mw1N4HcER",
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T18:23:41.590296Z",
|
||||
"start_time": "2025-04-21T18:23:41.253237Z"
|
||||
}
|
||||
},
|
||||
"id": "Ua7Mw1N4HcER"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# If predictionguard_api_key is not passed, default behavior is to use the `PREDICTIONGUARD_API_KEY` environment variable.\n",
|
||||
"chat = ChatPredictionGuard(model=\"Hermes-3-Llama-3.1-8B\")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 5
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": "## Invocation",
|
||||
"id": "8dbdfc55b638e4c2"
|
||||
"id": "8dbdfc55b638e4c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5a1635e7ae7134a3",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-11-08T19:44:56.634939Z",
|
||||
"start_time": "2024-11-08T19:44:55.924534Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\"system\", \"You are a helpful assistant that tells jokes.\"),\n",
|
||||
" (\"human\", \"Tell me a joke\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = chat.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
],
|
||||
"id": "5a1635e7ae7134a3",
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -177,18 +183,26 @@
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 4
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\"system\", \"You are a helpful assistant that tells jokes.\"),\n",
|
||||
" (\"human\", \"Tell me a joke\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = chat.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a6f8025726e5da3c",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-11-08T19:44:57.501782Z",
|
||||
"start_time": "2024-11-08T19:44:57.498931Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": "print(ai_msg.content)",
|
||||
"id": "a6f8025726e5da3c",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -198,16 +212,21 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 5
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9e96106-8e44-4373-9c57-adc3d0062df3",
|
||||
"metadata": {},
|
||||
"source": "## Streaming"
|
||||
"source": [
|
||||
"## Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "ea62d2da-802c-4b8a-a63e-5d1d0a72540f",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
@@ -215,12 +234,6 @@
|
||||
"start_time": "2024-11-08T19:44:59.095584Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(model=\"Hermes-2-Pro-Llama-3-8B\")\n",
|
||||
"\n",
|
||||
"for chunk in chat.stream(\"Tell me a joke\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -232,33 +245,39 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 6
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(model=\"Hermes-2-Pro-Llama-3-8B\")\n",
|
||||
"\n",
|
||||
"for chunk in chat.stream(\"Tell me a joke\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1227780d6e6728ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool Calling\n",
|
||||
"\n",
|
||||
"Prediction Guard has a tool calling API that lets you describe tools and their arguments, which enables the model return a JSON object with a tool to call and the inputs to that tool. Tool-calling is very useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n"
|
||||
],
|
||||
"id": "1227780d6e6728ba"
|
||||
"Prediction Guard has a tool calling API that lets you describe tools and their arguments, which enables the model to return a JSON object with a tool to call and the inputs to that tool. Tool-calling is very useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "23446aa52e01d1ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ChatPredictionGuard.bind_tools()\n",
|
||||
"\n",
|
||||
"Using `ChatPredictionGuard.bind_tools()`, you can pass in Pydantic classes, dict schemas, and Langchain tools as tools to the model, which are then reformatted to allow for use by the model."
|
||||
],
|
||||
"id": "23446aa52e01d1ba"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"id": "135efb0bfc5916c1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
@@ -279,24 +298,18 @@
|
||||
" [GetWeather, GetPopulation]\n",
|
||||
" # strict = True # enforce tool args schema is respected\n",
|
||||
")"
|
||||
],
|
||||
"id": "135efb0bfc5916c1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "8136f19a8836cd58",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T18:42:41.834079Z",
|
||||
"start_time": "2025-04-21T18:42:40.289095Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"Which city is hotter today and which is bigger: LA or NY?\"\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
],
|
||||
"id": "8136f19a8836cd58",
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -309,28 +322,33 @@
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 7
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"Which city is hotter today and which is bigger: LA or NY?\"\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "84f405c45a35abe5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AIMessage.tool_calls\n",
|
||||
"\n",
|
||||
"Notice that the AIMessage has a tool_calls attribute. This contains in a standardized ToolCall format that is model-provider agnostic."
|
||||
],
|
||||
"id": "84f405c45a35abe5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "bdcee85475019719",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-21T18:43:00.429453Z",
|
||||
"start_time": "2025-04-21T18:43:00.426399Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": "ai_msg.tool_calls",
|
||||
"id": "bdcee85475019719",
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -358,7 +376,9 @@
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 8
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -386,6 +406,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9c5d7a87",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
@@ -393,16 +414,6 @@
|
||||
"start_time": "2024-11-08T19:45:01.633319Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\", predictionguard_input={\"pii\": \"block\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" chat.invoke(\"Hello, my name is John Doe and my SSN is 111-22-3333\")\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -412,7 +423,16 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 7
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\", predictionguard_input={\"pii\": \"block\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" chat.invoke(\"Hello, my name is John Doe and my SSN is 111-22-3333\")\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -424,6 +444,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "a9f96fb4-00c3-4a39-b177-d1ccd5caecab",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
@@ -431,6 +452,15 @@
|
||||
"start_time": "2024-11-08T19:45:03.275661Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Could not make prediction. prompt injection detected\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\",\n",
|
||||
@@ -443,17 +473,7 @@
|
||||
" )\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Could not make prediction. prompt injection detected\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 8
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -483,23 +503,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0cb3b91f",
|
||||
"metadata": {
|
||||
"id": "PzxSbYwqTm2w",
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-11-08T19:45:10.044203Z",
|
||||
"start_time": "2024-11-08T19:45:05.692378Z"
|
||||
}
|
||||
},
|
||||
"id": "PzxSbYwqTm2w"
|
||||
},
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\", predictionguard_output={\"toxicity\": True}\n",
|
||||
")\n",
|
||||
"try:\n",
|
||||
" chat.invoke(\"Please tell me something that would fail a toxicity check!\")\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -509,7 +521,15 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 9
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\", predictionguard_output={\"toxicity\": True}\n",
|
||||
")\n",
|
||||
"try:\n",
|
||||
" chat.invoke(\"Please tell me something that would fail a toxicity check!\")\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -521,6 +541,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "249da02a-d32d-4f91-82d0-10ec0505aec7",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
@@ -528,16 +549,6 @@
|
||||
"start_time": "2024-11-08T19:45:10.109509Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\", predictionguard_output={\"factuality\": True}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" chat.invoke(\"Make up something that would fail a factuality check!\")\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -547,22 +558,47 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 10
|
||||
"source": [
|
||||
"chat = ChatPredictionGuard(\n",
|
||||
" model=\"Hermes-2-Pro-Llama-3-8B\", predictionguard_output={\"factuality\": True}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" chat.invoke(\"Make up something that would fail a factuality check!\")\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(e)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": "## Chaining",
|
||||
"id": "3c81e5a85a765ece"
|
||||
"id": "3c81e5a85a765ece",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "beb4e0666bb514a7",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-11-08T19:45:17.525848Z",
|
||||
"start_time": "2024-11-08T19:45:15.197628Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Step 1: Determine the year Justin Bieber was born.\\nJustin Bieber was born on March 1, 1994.\\n\\nStep 2: Determine which NFL team won the Super Bowl in 1994.\\nThe 1994 Super Bowl was Super Bowl XXVIII, which took place on January 30, 1994. The winning team was the Dallas Cowboys, who defeated the Buffalo Bills with a score of 30-13.\\n\\nSo, the NFL team that won the Super Bowl in the year Justin Bieber was born is the Dallas Cowboys.', additional_kwargs={}, response_metadata={}, id='run-bbc94f8b-9ab0-4839-8580-a9e510bfc97a-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
@@ -577,30 +613,24 @@
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"chat_chain.invoke({\"question\": question})"
|
||||
],
|
||||
"id": "beb4e0666bb514a7",
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Step 1: Determine the year Justin Bieber was born.\\nJustin Bieber was born on March 1, 1994.\\n\\nStep 2: Determine which NFL team won the Super Bowl in 1994.\\nThe 1994 Super Bowl was Super Bowl XXVIII, which took place on January 30, 1994. The winning team was the Dallas Cowboys, who defeated the Buffalo Bills with a score of 30-13.\\n\\nSo, the NFL team that won the Super Bowl in the year Justin Bieber was born is the Dallas Cowboys.', additional_kwargs={}, response_metadata={}, id='run-bbc94f8b-9ab0-4839-8580-a9e510bfc97a-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 11
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "d87695d5ff1471c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"For detailed documentation of all ChatPredictionGuard features and configurations check out the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.predictionguard.ChatPredictionGuard.html"
|
||||
],
|
||||
"id": "d87695d5ff1471c1"
|
||||
"For detailed documentation of all ChatPredictionGuard features and configurations, check out the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.predictionguard.ChatPredictionGuard.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3664cc0e-841c-46f1-a158-4d5f5185bc94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -622,7 +652,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"source": [
|
||||
"# ChatQwQ\n",
|
||||
"\n",
|
||||
"This will help you getting started with QwQ [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatQwQ features and configurations head to the [API reference](https://pypi.org/project/langchain-qwq/).\n",
|
||||
"This will help you get started with QwQ [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatQwQ features and configurations head to the [API reference](https://pypi.org/project/langchain-qwq/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Reka models you'll need to create an Reka developer account, get an API key, and install the `langchain_community` integration package and the reka python package via 'pip install reka-api'.\n",
|
||||
"To access Reka models you'll need to create a Reka developer account, get an API key, and install the `langchain_community` integration package and the reka python package via 'pip install reka-api'.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
@@ -280,7 +280,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Use use with tavtly api search"
|
||||
"Use with Tavily api search"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"source": [
|
||||
"# ChatSambaNovaCloud\n",
|
||||
"\n",
|
||||
"This will help you getting started with SambaNovaCloud [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatSambaNovaCloud features and configurations head to the [API reference](https://docs.sambanova.ai/cloud/docs/get-started/overview).\n",
|
||||
"This will help you get started with SambaNovaCloud [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatSambaNovaCloud features and configurations head to the [API reference](https://docs.sambanova.ai/cloud/docs/get-started/overview).\n",
|
||||
"\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [SambaNova Cloud](https://cloud.sambanova.ai/) is a platform for performing inference with open-source models\n",
|
||||
"\n",
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"source": [
|
||||
"# ChatSambaStudio\n",
|
||||
"\n",
|
||||
"This will help you getting started with SambaStudio [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatStudio features and configurations head to the [API reference](https://docs.sambanova.ai/sambastudio/latest/index.html).\n",
|
||||
"This will help you get started with SambaStudio [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatStudio features and configurations head to the [API reference](https://docs.sambanova.ai/sambastudio/latest/index.html).\n",
|
||||
"\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [SambaStudio](https://docs.sambanova.ai/sambastudio/latest/sambastudio-intro.html) SambaStudio is a rich, GUI-based platform that provides the functionality to train, deploy, and manage models in SambaNova [DataScale](https://sambanova.ai/products/datascale) systems.\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "516cad96-cfcb-4dd1-b70e-ecaef33e60ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Deprecated since version 0.0.34: Use langchain_upstage.ChatUpstage instead."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
@@ -72,7 +80,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.0"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
"source": [
|
||||
"# Nebula (Symbl.ai)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"This notebook covers how to get started with [Nebula](https://docs.symbl.ai/docs/nebula-llm) - Symbl.ai's chat model.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"> script creation, resume generation, article writing, code generation, data analysis, and content\n",
|
||||
"> analysis.\n",
|
||||
"\n",
|
||||
"See for [more information](https://cloud.tencent.com/document/product/1729)."
|
||||
"See [more information](https://cloud.tencent.com/document/product/1729) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,7 +98,7 @@
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## For ChatHunyuan with Streaming"
|
||||
"## Using ChatHunyuan with Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"# ChatTogether\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with Together AI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatTogether features and configurations head to the [API reference](https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html).\n",
|
||||
"This page will help you get started with Together AI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatTogether features and configurations, head to the [API reference](https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html).\n",
|
||||
"\n",
|
||||
"[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/chat-models)\n",
|
||||
"\n",
|
||||
@@ -40,7 +40,7 @@
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://api.together.ai) to sign up to Together and generate an API key. Once you've done this set the TOGETHER_API_KEY environment variable:"
|
||||
"Head to [this page](https://api.together.ai) to sign up to Together and generate an API key. Once you've done this, set the TOGETHER_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -81,7 +81,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Together integration lives in the `langchain-together` package:"
|
||||
"The LangChain Together integration is included in the `langchain-together` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -187,7 +187,7 @@
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -237,7 +237,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatTogether features and configurations head to the API reference: https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html"
|
||||
"For detailed documentation of all ChatTogether features and configurations, head to the API reference: https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -5,16 +5,14 @@
|
||||
"id": "134a0785",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.\n",
|
||||
"[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform, which focuses on enterprise readiness for mission-critical applications.\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"2. ML-based chunking that provides state-of-the-art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"6. An LLM for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"For more information:\n",
|
||||
"- [Documentation](https://docs.vectara.com/docs/)\n",
|
||||
@@ -26,7 +24,7 @@
|
||||
"\n",
|
||||
"### Setup\n",
|
||||
"\n",
|
||||
"To use the `VectaraVectorStore` you first need to install the partner package.\n"
|
||||
"To use the `VectaraVectorStore`, you first need to install the partner package.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -48,8 +46,8 @@
|
||||
"\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"2. Within your account, you can create one or more corpora. Each corpus represents an area that stores text data upon ingestion from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally, you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next, you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\", and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these two values: `corpus_key` and `api_key`.\n",
|
||||
"You can provide `VECTARA_API_KEY` to LangChain in two ways:\n",
|
||||
@@ -75,7 +73,7 @@
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
"In this notebook, we assume they are provided in the environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -111,9 +109,9 @@
|
||||
"\n",
|
||||
"In most uses of LangChain to create chatbots, one must integrate a special `memory` component that maintains the history of chat sessions and then uses that history to ensure the chatbot is aware of conversation history.\n",
|
||||
"\n",
|
||||
"With Vectara Chat - all of that is performed in the backend by Vectara automatically. You can look at the [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) documentation for the details, to learn more about the internals of how this is implemented, but with LangChain all you have to do is turn that feature on in the Vectara vectorstore.\n",
|
||||
"With Vectara Chat, all of that is performed in the backend by Vectara automatically. You can look at the [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) documentation for the details, to learn more about the internals of how this is implemented, but with LangChain, all you have to do is turn that feature on in the Vectara vectorstore.\n",
|
||||
"\n",
|
||||
"Let's see an example. First we load the SOTU document (remember, text extraction and chunking all occurs automatically on the Vectara platform):"
|
||||
"Let's see an example. First, we load the SOTU document (remember, text extraction and chunking all occur automatically on the Vectara platform):"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -243,8 +241,8 @@
|
||||
"source": [
|
||||
"## Chat with streaming\n",
|
||||
"\n",
|
||||
"Of course the chatbot interface also supports streaming.\n",
|
||||
"Instead of the `invoke` method you simply use `stream`:"
|
||||
"Of course, the chatbot interface also supports streaming.\n",
|
||||
"Instead of the `invoke` method, you simply use `stream`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -281,12 +279,15 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "cefdf72b1d90085a",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"For additional capabilities you can use chaining."
|
||||
"For additional capabilities, you can use chaining."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -346,7 +347,10 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "3b8bb761-db4a-436c-8939-41e9f8652083",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
@@ -371,7 +375,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.0"
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"vLLM can be deployed as a server that mimics the OpenAI API protocol. This allows vLLM to be used as a drop-in replacement for applications using OpenAI API. This server can be queried in the same format as OpenAI API.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"This will help you getting started with vLLM [chat models](/docs/concepts/chat_models), which leverage the `langchain-openai` package. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html).\n",
|
||||
"This will help you get started with vLLM [chat models](/docs/concepts/chat_models), which leverages the `langchain-openai` package. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html).\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
@@ -29,7 +29,7 @@
|
||||
"| [ChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain_openai](https://python.langchain.com/api_reference/openai/) | ✅ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"Specific model features-- such as tool calling, support for multi-modal inputs, support for token-level streaming, etc.-- will depend on the hosted model.\n",
|
||||
"Specific model features, such as tool calling, support for multi-modal inputs, support for token-level streaming, etc., will depend on the hosted model.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Volc Enging Maas\n",
|
||||
"sidebar_label: Volc Engine Maas\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -303,7 +303,7 @@
|
||||
"source": [
|
||||
"### A note on tool binding\n",
|
||||
"\n",
|
||||
"The `ChatWriter.bind_tools()` method does not create new instance with bound tools, but stores the received `tools` and `tool_choice` in the initial class instance attributes to pass them as parameters during the Palmyra LLM call while using `ChatWriter` invocation. This approach allows the support of different tool types, e.g. `function` and `graph`. `Graph` is one of the remotely called Writer Palmyra tools. For further information visit our [docs](https://dev.writer.com/api-guides/knowledge-graph#knowledge-graph). \n",
|
||||
"The `ChatWriter.bind_tools()` method does not create a new instance with bound tools, but stores the received `tools` and `tool_choice` in the initial class instance attributes to pass them as parameters during the Palmyra LLM call while using `ChatWriter` invocation. This approach allows the support of different tool types, e.g. `function` and `graph`. `Graph` is one of the remotely called Writer Palmyra tools. For further information, visit our [docs](https://dev.writer.com/api-guides/knowledge-graph#knowledge-graph). \n",
|
||||
"\n",
|
||||
"For more information about tool usage in LangChain, visit the [LangChain tool calling documentation](https://python.langchain.com/docs/concepts/tool_calling/)."
|
||||
]
|
||||
@@ -373,7 +373,7 @@
|
||||
"source": [
|
||||
"## Prompt templates\n",
|
||||
"\n",
|
||||
"[Prompt templates](https://python.langchain.com/docs/concepts/prompt_templates/) help to translate user input and parameters into instructions for a language model. You can use `ChatWriter` with a prompt templates like so:\n"
|
||||
"[Prompt templates](https://python.langchain.com/docs/concepts/prompt_templates/) help to translate user input and parameters into instructions for a language model. You can use `ChatWriter` with a prompt template like so:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -411,7 +411,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"For detailed documentation of all ChatWriter features and configurations head to the [API reference](https://python.langchain.com/api_reference/writer/chat_models/langchain_writer.chat_models.ChatWriter.html#langchain_writer.chat_models.ChatWriter).\n",
|
||||
"For detailed documentation of all ChatWriter features and configurations, head to the [API reference](https://python.langchain.com/api_reference/writer/chat_models/langchain_writer.chat_models.ChatWriter.html#langchain_writer.chat_models.ChatWriter).\n",
|
||||
"\n",
|
||||
"## Additional resources\n",
|
||||
"You can find information about Writer's models (including costs, context windows, and supported input types) and tools in the [Writer docs](https://dev.writer.com/home)."
|
||||
|
||||
@@ -1,330 +1,373 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: xAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatXAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with xAI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all `ChatXAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html).\n",
|
||||
"\n",
|
||||
"[xAI](https://console.x.ai/) offers an API to interact with Grok models.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/xai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatXAI](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html) | [langchain-xai](https://python.langchain.com/api_reference/xai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access xAI models you'll need to create an xAI account, get an API key, and install the `langchain-xai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://console.x.ai/) to sign up for xAI and generate an API key. Once you've done this set the `XAI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"XAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"XAI_API_KEY\"] = getpass.getpass(\"Enter your xAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain xAI integration lives in the `langchain-xai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-xai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_xai import ChatXAI\n",
|
||||
"\n",
|
||||
"llm = ChatXAI(\n",
|
||||
" model=\"grok-beta\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 30, 'total_tokens': 36, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-adffb7a3-e48a-4f52-b694-340d85abe5c3-0', usage_metadata={'input_tokens': 30, 'output_tokens': 6, 'total_tokens': 36, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 25, 'total_tokens': 32, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-569fc8dc-101b-4e6d-864e-d4fa80df2b63-0', usage_metadata={'input_tokens': 25, 'output_tokens': 7, 'total_tokens': 32, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e074bce1-0994-4b83-b393-ae7aa7e21750",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"ChatXAI has a [tool calling](https://docs.x.ai/docs#capabilities) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. Tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
|
||||
"\n",
|
||||
"### ChatXAI.bind_tools()\n",
|
||||
"\n",
|
||||
"With `ChatXAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
" \"description\": \"...\",\n",
|
||||
" \"parameters\": {...} # JSONSchema\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"and passed in every model invocation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c6bfe929-ec02-46bd-9d54-76350edddabc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5265c892-d8c2-48af-aef5-adbee1647ba6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am retrieving the current weather for San Francisco.', additional_kwargs={'tool_calls': [{'id': '0', 'function': {'arguments': '{\"location\":\"San Francisco, CA\"}', 'name': 'GetWeather'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 151, 'total_tokens': 162, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-73707da7-afec-4a52-bee1-a176b0ab8585-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': '0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 151, 'output_tokens': 11, 'total_tokens': 162, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `ChatXAI` features and configurations head to the API reference: https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: xAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatXAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with xAI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all `ChatXAI` features and configurations, head to the [API reference](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html).\n",
|
||||
"\n",
|
||||
"[xAI](https://console.x.ai/) offers an API to interact with Grok models.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/xai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatXAI](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html) | [langchain-xai](https://python.langchain.com/api_reference/xai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access xAI models, you'll need to create an xAI account, get an API key, and install the `langchain-xai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://console.x.ai/) to sign up for xAI and generate an API key. Once you've done this, set the `XAI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"XAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"XAI_API_KEY\"] = getpass.getpass(\"Enter your xAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain xAI integration lives in the `langchain-xai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-xai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_xai import ChatXAI\n",
|
||||
"\n",
|
||||
"llm = ChatXAI(\n",
|
||||
" model=\"grok-beta\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 30, 'total_tokens': 36, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-adffb7a3-e48a-4f52-b694-340d85abe5c3-0', usage_metadata={'input_tokens': 30, 'output_tokens': 6, 'total_tokens': 36, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 25, 'total_tokens': 32, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-569fc8dc-101b-4e6d-864e-d4fa80df2b63-0', usage_metadata={'input_tokens': 25, 'output_tokens': 7, 'total_tokens': 32, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e074bce1-0994-4b83-b393-ae7aa7e21750",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"ChatXAI has a [tool calling](https://docs.x.ai/docs#capabilities) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. Tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
|
||||
"\n",
|
||||
"### ChatXAI.bind_tools()\n",
|
||||
"\n",
|
||||
"With `ChatXAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood, these are converted to an OpenAI tool schema, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
" \"description\": \"...\",\n",
|
||||
" \"parameters\": {...} # JSONSchema\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"and passed in every model invocation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c6bfe929-ec02-46bd-9d54-76350edddabc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5265c892-d8c2-48af-aef5-adbee1647ba6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am retrieving the current weather for San Francisco.', additional_kwargs={'tool_calls': [{'id': '0', 'function': {'arguments': '{\"location\":\"San Francisco, CA\"}', 'name': 'GetWeather'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 151, 'total_tokens': 162, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-73707da7-afec-4a52-bee1-a176b0ab8585-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': '0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 151, 'output_tokens': 11, 'total_tokens': 162, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "00297c44-9bd6-4f1f-b364-2a7ff77090fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Live Search\n",
|
||||
"\n",
|
||||
"xAI supports a [Live Search](https://docs.x.ai/docs/guides/live-search) feature that enables Grok to ground its answers using results from web searches:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d39cd1a4-80f2-48b2-8a74-3856d7706973",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_xai import ChatXAI\n",
|
||||
"\n",
|
||||
"llm = ChatXAI(\n",
|
||||
" model=\"grok-3-latest\",\n",
|
||||
" search_parameters={\n",
|
||||
" \"mode\": \"auto\",\n",
|
||||
" # Example optional parameters below:\n",
|
||||
" \"max_search_results\": 3,\n",
|
||||
" \"from_date\": \"2025-05-26\",\n",
|
||||
" \"to_date\": \"2025-05-27\",\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm.invoke(\"Provide me a digest of world news in the last 24 hours.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc62dc4d-e3ce-4b8b-8b94-d3e2e1a48bd1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [xAI docs](https://docs.x.ai/docs/guides/live-search) for the full set of web search options."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `ChatXAI` features and configurations, head to the API reference: https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# ChatYI\n",
|
||||
"\n",
|
||||
"This will help you getting started with Yi [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatYi features and configurations head to the [API reference](https://python.langchain.com/api_reference/lanchain_community/chat_models/lanchain_community.chat_models.yi.ChatYi.html).\n",
|
||||
"This will help you get started with Yi [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatYi features and configurations head to the [API reference](https://python.langchain.com/api_reference/lanchain_community/chat_models/lanchain_community.chat_models.yi.ChatYi.html).\n",
|
||||
"\n",
|
||||
"[01.AI](https://www.lingyiwanwu.com/en), founded by Dr. Kai-Fu Lee, is a global company at the forefront of AI 2.0. They offer cutting-edge large language models, including the Yi series, which range from 6B to hundreds of billions of parameters. 01.AI also provides multimodal models, an open API platform, and open-source options like Yi-34B/9B/6B and Yi-VL.\n",
|
||||
"\n",
|
||||
@@ -29,7 +29,7 @@
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [01.AI](https://platform.01.ai) to sign up to 01.AI and generate an API key. Once you've done this set the `YI_API_KEY` environment variable:"
|
||||
"Head to [01.AI](https://platform.01.ai) to sign up for 01.AI and generate an API key. Once you've done this set the `YI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -197,7 +197,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatYi features and configurations head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.yi.ChatYi.html"
|
||||
"For detailed documentation of all ChatYi features and configurations, head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.yi.ChatYi.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setting Up Your API Key\n",
|
||||
"Sign in to [ZHIPU AI](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys) for the an API Key to access our models."
|
||||
"Sign in to [ZHIPU AI](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys) for an API Key to access our models."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Facebook Messenger\n",
|
||||
"\n",
|
||||
"This notebook shows how to load data from Facebook in a format you can fine-tune on. The overall steps are:\n",
|
||||
"This notebook shows how to load data from Facebook into a format you can fine-tune on. The overall steps are:\n",
|
||||
"\n",
|
||||
"1. Download your messenger data to disk.\n",
|
||||
"2. Create the Chat Loader and call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n",
|
||||
@@ -25,7 +25,7 @@
|
||||
"\n",
|
||||
"## 1. Download Data\n",
|
||||
"\n",
|
||||
"To download your own messenger data, following instructions [here](https://www.zapptales.com/en/download-facebook-messenger-chat-history-how-to/). IMPORTANT - make sure to download them in JSON format (not HTML).\n",
|
||||
"To download your own messenger data, follow the instructions [here](https://www.zapptales.com/en/download-facebook-messenger-chat-history-how-to/). IMPORTANT - make sure to download them in JSON format (not HTML).\n",
|
||||
"\n",
|
||||
"We are hosting an example dump at [this google drive link](https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing) that we will use in this walkthrough."
|
||||
]
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"Provide the loader with the file path to the zip directory. You can optionally specify the user id that maps to an ai message as well an configure whether to merge message runs."
|
||||
"Provide the loader with the file path to the zip directory. You can optionally specify the user id that maps to an ai message as well as configure whether to merge message runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# LangSmith Chat Datasets\n",
|
||||
"\n",
|
||||
"This notebook demonstrates an easy way to load a LangSmith chat dataset fine-tune a model on that data.\n",
|
||||
"This notebook demonstrates an easy way to load a LangSmith chat dataset and fine-tune a model on that data.\n",
|
||||
"The process is simple and comprises 3 steps.\n",
|
||||
"\n",
|
||||
"1. Create the chat dataset.\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"## 1. Create message dump\n",
|
||||
"\n",
|
||||
"Currently (2023/08/23) this loader best supports a zip directory of files in the format generated by exporting your a direct message conversation from Slack. Follow up-to-date instructions from slack on how to do so.\n",
|
||||
"Currently (2023/08/23), this loader best supports a zip directory of files in the format generated by exporting your a direct message conversation from Slack. Follow the up-to-date instructions from slack on how to do so.\n",
|
||||
"\n",
|
||||
"We have an example in the LangChain repo."
|
||||
]
|
||||
@@ -43,7 +43,7 @@
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"Provide the loader with the file path to the zip directory. You can optionally specify the user id that maps to an ai message as well an configure whether to merge message runs."
|
||||
"Provide the loader with the file path to the zip directory. You can optionally specify the user id that maps to an ai message as well as configure whether to merge message runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"This notebook shows how to use the Telegram chat loader. This class helps map exported Telegram conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
"The process has three steps:\n",
|
||||
"1. Export the chat .txt file by copying chats from the Telegram app and pasting them in a file on your local computer\n",
|
||||
"1. Export the chat .txt file by copying chats from the Telegram app and pasting them in a file on your local computer\n",
|
||||
"2. Create the `TelegramChatLoader` with the file path pointed to the json file or directory of JSON files\n",
|
||||
"3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class.\n",
|
||||
"\n",
|
||||
@@ -92,7 +92,7 @@
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"All that's required is the file path. You can optionally specify the user name that maps to an ai message as well an configure whether to merge message runs."
|
||||
"All that's required is the file path. You can optionally specify the user name that maps to an ai message as well as configure whether to merge message runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"The process has five steps:\n",
|
||||
"1. Open your chat in the WeChat desktop app. Select messages you need by mouse-dragging or right-click. Due to restrictions, you can select up to 100 messages once a time. `CMD`/`Ctrl` + `C` to copy.\n",
|
||||
"1. Open your chat in the WeChat desktop app. Select messages you need by mouse-dragging or right-click. Due to restrictions, you can select up to 100 messages at a time. `CMD`/`Ctrl` + `C` to copy.\n",
|
||||
"2. Create the chat .txt file by pasting selected messages in a file on your local computer.\n",
|
||||
"3. Copy the chat loader definition from below to a local file.\n",
|
||||
"4. Initialize the `WeChatChatLoader` with the file path pointed to the text file.\n",
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"id": "4a93dc2a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As `load` returns a list, it will block until all documents are loaded. To have better control over this process, you can also you the `lazy_load` method which returns an iterator instead:"
|
||||
"As `load` returns a list, it will block until all documents are loaded. To have better control over this process, you can also use the `lazy_load` method which returns an iterator instead:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -170,7 +170,7 @@
|
||||
"id": "3a124086",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Keep in mind that by default the page content is empty and the metadata object contains all the information from the record. To create documents in a different, pass in a record_handler function when creating the loader:"
|
||||
"Keep in mind that by default the page content is empty and the metadata object contains all the information from the record. To create documents in a different way, pass in a record_handler function when creating the loader:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -131,7 +131,7 @@
|
||||
"id": "4a93dc2a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As `load` returns a list, it will block until all documents are loaded. To have better control over this process, you can also you the `lazy_load` method which returns an iterator instead:"
|
||||
"As `load` returns a list, it will block until all documents are loaded. To have better control over this process, you can also use the `lazy_load` method which returns an iterator instead:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user