mirror of
https://github.com/hwchase17/langchain.git
synced 2026-04-18 08:03:52 +00:00
Compare commits
145 Commits
cc/openai_
...
integratio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86e7d2ede2 | ||
|
|
e9ef2e980f | ||
|
|
1833378240 | ||
|
|
bb4391a2be | ||
|
|
e5f6239aed | ||
|
|
4130e6476b | ||
|
|
5d5b27f6ec | ||
|
|
45fa0ebd0a | ||
|
|
37190881d3 | ||
|
|
5ede4248ef | ||
|
|
5f812f5968 | ||
|
|
aae8306d6c | ||
|
|
83cfb9691f | ||
|
|
4d1d726e61 | ||
|
|
07cb41ea9e | ||
|
|
57ffacadd0 | ||
|
|
0ba03d8f3a | ||
|
|
f5a0092551 | ||
|
|
38b48d257d | ||
|
|
f949d9a3d3 | ||
|
|
9fb0db6937 | ||
|
|
168f1dfd93 | ||
|
|
f6cf2ce2ad | ||
|
|
2909b49045 | ||
|
|
958f85d541 | ||
|
|
36412c02b6 | ||
|
|
46d6bf0330 | ||
|
|
ff8ce60dcc | ||
|
|
251551ccf1 | ||
|
|
492b4c1604 | ||
|
|
5a6e1254a7 | ||
|
|
b91daf06eb | ||
|
|
f6a17fbc56 | ||
|
|
7ff7c4f81b | ||
|
|
036f00dc92 | ||
|
|
98a9ef19ec | ||
|
|
1985aaf095 | ||
|
|
789db7398b | ||
|
|
9b48e4c2b0 | ||
|
|
54eab796ab | ||
|
|
4c68749e38 | ||
|
|
531319f65b | ||
|
|
192035f8c0 | ||
|
|
a5eca20e1b | ||
|
|
620d723fbf | ||
|
|
2c8b8114fa | ||
|
|
e2ab4ccab3 | ||
|
|
5684653775 | ||
|
|
eb9b992aa6 | ||
|
|
d8510270ee | ||
|
|
747efa16ec | ||
|
|
c74e7b997d | ||
|
|
f54f14b747 | ||
|
|
ac22cde130 | ||
|
|
65a8f30729 | ||
|
|
18f9b5d8ab | ||
|
|
2c99f12062 | ||
|
|
0b80bec015 | ||
|
|
d5d0134e7b | ||
|
|
226f29bc96 | ||
|
|
f27e2d7ce7 | ||
|
|
bbd4b36d76 | ||
|
|
315bb17ef5 | ||
|
|
d0bfc7f820 | ||
|
|
5e0fa2cce5 | ||
|
|
733abcc884 | ||
|
|
e9c1765967 | ||
|
|
ebea5e014d | ||
|
|
5237987643 | ||
|
|
cd1ea8e94d | ||
|
|
49bdd3b6fe | ||
|
|
23fa70f328 | ||
|
|
c7842730ef | ||
|
|
81d1653a30 | ||
|
|
27d86d7bc8 | ||
|
|
70fc0b8363 | ||
|
|
62c570dd77 | ||
|
|
38420ee76e | ||
|
|
f896e701eb | ||
|
|
7b8f266039 | ||
|
|
aa6dae4a5b | ||
|
|
34e94755af | ||
|
|
67aff1648b | ||
|
|
b209d46eb3 | ||
|
|
df459d0d5e | ||
|
|
3823daa0b9 | ||
|
|
0d7cdf290b | ||
|
|
911accf733 | ||
|
|
b9746a6910 | ||
|
|
cee0fecb08 | ||
|
|
bac3a28e70 | ||
|
|
a7ab5e8372 | ||
|
|
6c05d4b153 | ||
|
|
1c993b921c | ||
|
|
9893e5cb80 | ||
|
|
88dc479c4a | ||
|
|
33a3510243 | ||
|
|
01317fde21 | ||
|
|
17507c9ba6 | ||
|
|
9e863c89d2 | ||
|
|
74e7772a5f | ||
|
|
3444e587ee | ||
|
|
3c258194ae | ||
|
|
34638ccfae | ||
|
|
4e5058f29c | ||
|
|
894fd63a61 | ||
|
|
806211475a | ||
|
|
d0f5bcda29 | ||
|
|
230876a7c5 | ||
|
|
5c7440c201 | ||
|
|
022ff9eead | ||
|
|
52b0570bec | ||
|
|
b3dc66f7a3 | ||
|
|
9b7b8e4a1a | ||
|
|
f0153414d5 | ||
|
|
1ee8aceaee | ||
|
|
c599ba47d5 | ||
|
|
417efa30a6 | ||
|
|
5f0102242a | ||
|
|
4710c1fa8c | ||
|
|
577c0d0715 | ||
|
|
ba5ddb218f | ||
|
|
9383a0536a | ||
|
|
fb16c25920 | ||
|
|
692a68bf1c | ||
|
|
484d945500 | ||
|
|
7eb6dde720 | ||
|
|
8575d7491f | ||
|
|
9a11e0edcd | ||
|
|
ccb64e9f4f | ||
|
|
33354f984f | ||
|
|
c7cd666a17 | ||
|
|
c671d54c6f | ||
|
|
eca8c5515d | ||
|
|
7c175e3fda | ||
|
|
3b066dc005 | ||
|
|
f8ed5007ea | ||
|
|
9e6ffd1264 | ||
|
|
86b364de3b | ||
|
|
a1897ca621 | ||
|
|
476cd26f57 | ||
|
|
f07338d2bf | ||
|
|
186cd7f1a1 | ||
|
|
46908ee3da | ||
|
|
0dbcc1d099 |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -1,2 +1,2 @@
|
||||
/.github/ @efriis @baskaryan @ccurme
|
||||
/libs/packages.yml @efriis
|
||||
/.github/ @baskaryan @ccurme
|
||||
/libs/packages.yml @ccurme
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -26,4 +26,4 @@ Additional guidelines:
|
||||
- Changes should be backwards compatible.
|
||||
- If you are adding something to community, do not re-import it in langchain.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17.
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, eyurtsev, ccurme, vbarda, hwchase17.
|
||||
|
||||
1
.github/workflows/_integration_test.yml
vendored
1
.github/workflows/_integration_test.yml
vendored
@@ -6,6 +6,7 @@ on:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
111
.github/workflows/_release.yml
vendored
111
.github/workflows/_release.yml
vendored
@@ -12,6 +12,7 @@ on:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
default: 'libs/langchain'
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
@@ -100,15 +101,32 @@ jobs:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
run: |
|
||||
PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
|
||||
# Handle regular versions and pre-release versions differently
|
||||
if [[ "$VERSION" == *"-"* ]]; then
|
||||
# This is a pre-release version (contains a hyphen)
|
||||
# Extract the base version without the pre-release suffix
|
||||
BASE_VERSION=${VERSION%%-*}
|
||||
# Look for the latest release of the same base version
|
||||
REGEX="^$PKG_NAME==$BASE_VERSION\$"
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
|
||||
|
||||
# If no exact base version match, look for the latest release of any kind
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
|
||||
fi
|
||||
else
|
||||
# Regular version handling
|
||||
PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
|
||||
|
||||
# backup case if releasing e.g. 0.3.0, looks up last release
|
||||
# note if last release (chronologically) was e.g. 0.1.47 it will get
|
||||
# that instead of the last 0.2 release
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
echo $REGEX
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P $REGEX || true) | head -1)
|
||||
# backup case if releasing e.g. 0.3.0, looks up last release
|
||||
# note if last release (chronologically) was e.g. 0.1.47 it will get
|
||||
# that instead of the last 0.2 release
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
echo $REGEX
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P $REGEX || true) | head -1)
|
||||
fi
|
||||
fi
|
||||
|
||||
# if PREV_TAG is empty, let it be empty
|
||||
@@ -312,12 +330,89 @@ jobs:
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
# Test select published packages against new core
|
||||
test-prior-published-packages-against-new-core:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
partner: [openai, anthropic]
|
||||
fail-fast: false # Continue testing other partners if one fails
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# We implement this conditional as Github Actions does not have good support
|
||||
# for conditionally needing steps. https://github.com/actions/runner/issues/491
|
||||
- name: Check if libs/core
|
||||
run: |
|
||||
if [ "${{ startsWith(inputs.working-directory, 'libs/core') }}" != "true" ]; then
|
||||
echo "Not in libs/core. Exiting successfully."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
- name: Set up Python + uv
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Test against ${{ matrix.partner }}
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
run: |
|
||||
# Identify latest tag
|
||||
LATEST_PACKAGE_TAG="$(
|
||||
git ls-remote --tags origin "langchain-${{ matrix.partner }}*" \
|
||||
| awk '{print $2}' \
|
||||
| sed 's|refs/tags/||' \
|
||||
| sort -Vr \
|
||||
| head -n 1
|
||||
)"
|
||||
echo "Latest package tag: $LATEST_PACKAGE_TAG"
|
||||
|
||||
# Shallow-fetch just that single tag
|
||||
git fetch --depth=1 origin tag "$LATEST_PACKAGE_TAG"
|
||||
|
||||
# Checkout the latest package files
|
||||
rm -rf $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}/*
|
||||
cd $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}
|
||||
git checkout "$LATEST_PACKAGE_TAG" -- .
|
||||
|
||||
# Print as a sanity check
|
||||
echo "Version number from pyproject.toml: "
|
||||
cat pyproject.toml | grep "version = "
|
||||
|
||||
# Run tests
|
||||
uv sync --group test --group test_integration
|
||||
uv pip install ../../core/dist/*.whl
|
||||
make integration_tests
|
||||
|
||||
publish:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
- test-prior-published-packages-against-new-core
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is used for trusted publishing:
|
||||
|
||||
1
.github/workflows/check_diffs.yml
vendored
1
.github/workflows/check_diffs.yml
vendored
@@ -1,4 +1,3 @@
|
||||
---
|
||||
name: CI
|
||||
|
||||
on:
|
||||
|
||||
1
.github/workflows/check_new_docs.yml
vendored
1
.github/workflows/check_new_docs.yml
vendored
@@ -1,4 +1,3 @@
|
||||
---
|
||||
name: Integration docs lint
|
||||
|
||||
on:
|
||||
|
||||
1
.github/workflows/codespell.yml
vendored
1
.github/workflows/codespell.yml
vendored
@@ -1,4 +1,3 @@
|
||||
---
|
||||
name: CI / cd . / make spell_check
|
||||
|
||||
on:
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
formats:
|
||||
- pdf
|
||||
|
||||
# Set the version of Python and other tools you might need
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
@@ -15,15 +10,16 @@ build:
|
||||
commands:
|
||||
- mkdir -p $READTHEDOCS_OUTPUT
|
||||
- cp -r api_reference_build/* $READTHEDOCS_OUTPUT
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/api_reference/conf.py
|
||||
|
||||
# If using Sphinx, optionally build your docs in additional formats such as PDF
|
||||
# formats:
|
||||
# - pdf
|
||||
formats:
|
||||
- pdf
|
||||
|
||||
# Optionally declare the Python requirements required to build your docs
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/api_reference/requirements.txt
|
||||
- requirements: docs/api_reference/requirements.txt
|
||||
|
||||
1
Makefile
1
Makefile
@@ -2,7 +2,6 @@
|
||||
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
UV_FROZEN = true
|
||||
UV_NO_SYNC = true
|
||||
|
||||
## help: Show this help info.
|
||||
help: Makefile
|
||||
|
||||
176
README.md
176
README.md
@@ -1,6 +1,12 @@
|
||||
# 🦜️🔗 LangChain
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset="docs/static/img/logo-dark.svg">
|
||||
<source media="(prefers-color-scheme: dark)" srcset="docs/static/img/logo-light.svg">
|
||||
<img alt="LangChain Logo" src="docs/static/img/logo-dark.svg" width="80%">
|
||||
</picture>
|
||||
|
||||
⚡ Build context-aware reasoning applications ⚡
|
||||
<div>
|
||||
<br>
|
||||
</div>
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/check_diffs.yml)
|
||||
@@ -12,131 +18,65 @@
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
Fill out [this form](https://www.langchain.com/contact-sales) to speak with our sales team.
|
||||
|
||||
## Quick Install
|
||||
|
||||
With pip:
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain
|
||||
together interoperable components and third-party integrations to simplify AI
|
||||
application development — all while future-proofing decisions as the underlying
|
||||
technology evolves.
|
||||
|
||||
```bash
|
||||
pip install langchain
|
||||
pip install -U langchain
|
||||
```
|
||||
|
||||
With conda:
|
||||
To learn more about LangChain, check out
|
||||
[the docs](https://python.langchain.com/docs/introduction/). If you’re looking for more
|
||||
advanced customization or agent orchestration, check out
|
||||
[LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building
|
||||
controllable agent workflows.
|
||||
|
||||
```bash
|
||||
conda install langchain -c conda-forge
|
||||
```
|
||||
## Why use LangChain?
|
||||
|
||||
## 🤔 What is LangChain?
|
||||
LangChain helps developers build applications powered by LLMs through a standard
|
||||
interface for models, embeddings, vector stores, and more.
|
||||
|
||||
**LangChain** is a framework for developing applications powered by large language models (LLMs).
|
||||
Use LangChain for:
|
||||
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and
|
||||
external / internal systems, drawing from LangChain’s vast library of integrations with
|
||||
model providers, tools, vector stores, retrievers, and more.
|
||||
- **Model interoperability**. Swap models in and out as your engineering team
|
||||
experiments to find the best choice for your application’s needs. As the industry
|
||||
frontier evolves, adapt quickly — LangChain’s abstractions keep you moving without
|
||||
losing momentum.
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
## LangChain’s ecosystem
|
||||
While the LangChain framework can be used standalone, it also integrates seamlessly
|
||||
with any LangChain product, giving developers a full suite of tools when building LLM
|
||||
applications.
|
||||
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's open-source
|
||||
[components](https://python.langchain.com/docs/concepts/) and
|
||||
[third-party integrations](https://python.langchain.com/docs/integrations/providers/).
|
||||
Use [LangGraph](https://langchain-ai.github.io/langgraph/) to build stateful agents with first-class streaming and human-in-the-loop support.
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Platform](https://langchain-ai.github.io/langgraph/cloud/).
|
||||
- [LangSmith](http://www.langchain.com/langsmith) - Helpful for agent evals and
|
||||
observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain
|
||||
visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can
|
||||
reliably handle complex tasks with LangGraph, our low-level agent orchestration
|
||||
framework. LangGraph offers customizable architecture, long-term memory, and
|
||||
human-in-the-loop workflows — and is trusted in production by companies like LinkedIn,
|
||||
Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform) - Deploy
|
||||
and scale agents effortlessly with a purpose-built deployment platform for long
|
||||
running, stateful workflows. Discover, reuse, configure, and share agents across
|
||||
teams — and iterate quickly with visual prototyping in
|
||||
[LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
|
||||
### Open-source libraries
|
||||
|
||||
- **`langchain-core`**: Base abstractions.
|
||||
- **Integration packages** (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Important integrations have been split into lightweight packages that are co-maintained by the LangChain team and the integration developers.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **`langchain-community`**: Third-party integrations that are community maintained.
|
||||
- **[LangGraph](https://langchain-ai.github.io/langgraph)**: LangGraph powers production-grade agents, trusted by Linkedin, Uber, Klarna, GitLab, and many more. Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it. To learn more about LangGraph, check out our first LangChain Academy course, *Introduction to LangGraph*, available [here](https://academy.langchain.com/courses/intro-to-langgraph).
|
||||
|
||||
### Productionization:
|
||||
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
|
||||
- **[LangGraph Platform](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
|
||||
|
||||

|
||||

|
||||
|
||||
## 🧱 What can you build with LangChain?
|
||||
|
||||
**❓ Question answering with RAG**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/tutorials/rag/)
|
||||
- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
|
||||
|
||||
**🧱 Extracting structured output**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/tutorials/extraction/)
|
||||
- End-to-end Example: [LangChain Extract](https://github.com/langchain-ai/langchain-extract/)
|
||||
|
||||
**🤖 Chatbots**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/tutorials/chatbot/)
|
||||
- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
|
||||
|
||||
And much more! Head to the [Tutorials](https://python.langchain.com/docs/tutorials/) section of the docs for more.
|
||||
|
||||
## 🚀 How does LangChain help?
|
||||
|
||||
The main value props of the LangChain libraries are:
|
||||
|
||||
1. **Components**: composable building blocks, tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not.
|
||||
2. **Easy orchestration with LangGraph**: [LangGraph](https://langchain-ai.github.io/langgraph/),
|
||||
built on top of `langchain-core`, has built-in support for [messages](https://python.langchain.com/docs/concepts/messages/), [tools](https://python.langchain.com/docs/concepts/tools/),
|
||||
and other LangChain abstractions. This makes it easy to combine components into
|
||||
production-ready applications with persistence, streaming, and other key features.
|
||||
Check out the LangChain [tutorials page](https://python.langchain.com/docs/tutorials/#orchestration) for examples.
|
||||
|
||||
## Components
|
||||
|
||||
Components fall into the following **modules**:
|
||||
|
||||
**📃 Model I/O**
|
||||
|
||||
This includes [prompt management](https://python.langchain.com/docs/concepts/prompt_templates/)
|
||||
and a generic interface for [chat models](https://python.langchain.com/docs/concepts/chat_models/), including a consistent interface for [tool-calling](https://python.langchain.com/docs/concepts/tool_calling/) and [structured output](https://python.langchain.com/docs/concepts/structured_outputs/) across model providers.
|
||||
|
||||
**📚 Retrieval**
|
||||
|
||||
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/docs/concepts/document_loaders/) from a variety of sources, [preparing it](https://python.langchain.com/docs/concepts/text_splitters/), then [searching over (a.k.a. retrieving from)](https://python.langchain.com/docs/concepts/retrievers/) it for use in the generation step.
|
||||
|
||||
**🤖 Agents**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. [LangGraph](https://langchain-ai.github.io/langgraph/) makes it easy to use
|
||||
LangChain components to build both [custom](https://langchain-ai.github.io/langgraph/tutorials/)
|
||||
and [built-in](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/)
|
||||
LLM agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
Please see [here](https://python.langchain.com) for full documentation, which includes:
|
||||
|
||||
- [Introduction](https://python.langchain.com/docs/introduction/): Overview of the framework and the structure of the docs.
|
||||
- [Tutorials](https://python.langchain.com/docs/tutorials/): If you're looking to build something specific or are more of a hands-on learner, check out our tutorials. This is the best place to get started.
|
||||
- [How-to guides](https://python.langchain.com/docs/how_to/): Answers to “How do I….?” type questions. These guides are goal-oriented and concrete; they're meant to help you complete a specific task.
|
||||
- [Conceptual guide](https://python.langchain.com/docs/concepts/): Conceptual explanations of the key parts of the framework.
|
||||
- [API Reference](https://python.langchain.com/api_reference/): Thorough documentation of every class and method.
|
||||
|
||||
## 🌐 Ecosystem
|
||||
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
|
||||
- [🦜🕸️ LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform): Deploy LLM applications built with LangGraph into production.
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/docs/contributing/).
|
||||
|
||||
## 🌟 Contributors
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/graphs/contributors)
|
||||
## Additional resources
|
||||
- [Tutorials](https://python.langchain.com/docs/tutorials/): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [How-to Guides](https://python.langchain.com/docs/how_to/): Quick, actionable code
|
||||
snippets for topics such as tool calling, RAG use cases, and more.
|
||||
- [Conceptual Guides](https://python.langchain.com/docs/concepts/): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [API Reference](https://python.langchain.com/api_reference/): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
|
||||
@@ -328,7 +328,7 @@ html[data-theme=dark] .MathJax_SVG * {
|
||||
}
|
||||
|
||||
.bd-sidebar-primary {
|
||||
width: 22%; /* Adjust this value to your preference */
|
||||
width: max-content; /* Adjust this value to your preference */
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
eNrtVnl0E3Ueb4GtgAesT1dQjpgVdbWTziSTs+RBD5IW0jNJL4p1MvNLM81cnaNtWrpWLD4flC0p4AG7rpTSYFsK2AJySlFXFFRA0aUooOyKBy5KQURQ9pc0peWh7/n28d66+8wfyWR+3+PzPX+feaEKIEo0z8V20JwMRIKU4R+paV5IBOUKkOT6VhbIPp5qyc5yulYpIn34AZ8sC5IlIYEQaA0vAI6gNSTPJlRgCaSPkBPgs8CAiJkWD08FemNTatQskCSiFEhqy+waNclDV5ystqjzocJ9kkr2AVUlIOCPqKI5VTIvyTw3TR2vFnkGQDFFAqK6dk68muUpwMAXpYKM4DzC0hwNpSRZBASrtngJRgLxahmwAoxEVkSoi2pQ+IbnmX7XckAIG/QqXCRQqHzl0VKj5gg2fFoK5JIoHChAAYkUaaFfRm0H8lC4GiggECLUg8mTwjYEEeZElGkQ+cfwJDFgPeoboqW5UnVtLQwP5pgWAQWhDUrCMKOSvKcMkDKUrJ1TG/IBgoIuGlt8MDvBzquTv44gSQBzAjiSp6D14NrSalqIV1HAyxAyaIMZ50AkzGCbHwABIRi6ArT2awXXE4LA0P3uE8oknuuIVggJA7n2uC1cDwSWk5OD3VkQRFJ6QnYAdgmnwjQGTIOtr0IkmaA5BlYdYQiIp1WInG8beiAQpB8aQaIdGGztV+4cKsNLwdUZBJnlvMokIZK+4GpCZA1419D3osLJNAuCoZTsa91FDwfd6TSYVmPacJVhKcCRwdWRRtp8lTKQxQBC8tBGcCXaOZAfBnClsi+4SqfVrRGBJMCeB4+1QjVZkea1wFqAfXtC0d5vzpo1UMSjMXe0pMK6BHe4fEq8CjWoMghRpUW1ehVmsOh0FhxX2TNcHSlRN64fLcMGl0hwkheWYsZA2UOkT+H8gGpL+dGC7wgXHEYThg9HCwFVAi8BJIoq2FGA5PZPPZKe2tXfXQgvlhIcXR1xG9wRqXxldVUlRSoU5auoZFFzNa6jPUAhvd1RFTgCYTcQEMJKwVW4Tt8ZPRnIfRuMFUUwFEGxrVUInFXA0CwN8xn5jq4eKdiiR1H0xWsFZN4POCkYwtHIZ+dQCRGwsGhh34NmcLPZvP3HhQZM6aCI2ajferWUBIaiwbSs9OK1AlETzajUUTUgjdBU8PA98E8JoaXMHr0eJylCZzR4dVrMRGBmwmPEUEyvA8SW8D4goZVwMQVelBEJkHDPyoHg4XiWqArPmVUHRQ0w0kS4HklGoYBT8aTy4RikRJUgAoYnqHWkFyEJ0geQ/v4LhlILM5My0lPanBBkCs/7adDUGzuupIT0lnhYK5UXKChnUhiHO60cNYvJQoEsukRjEup345WEwNJYSpqU5EzPsfMIZsQxrdFk0uIIpkE1cEoRh8dv0uWSdjTfp+NLAnaz35lRaMygdTPNmWhBmZMRGCXTpfHmutOqzclV2pmObHNWkjbXVpXkypNmZmp0eHlmJenT0kwuhuL+VNLuNThmCTnp2Q5TmQE3G4n8HFdWieyHUQtw2VoTElWwYeG+lKzRsUHg2CDhoTFa0IGhSVRRkcRYNVevyERVGry3sjgmkKhyhjMM4C/c205aBtZMngOHl8LEKBU0Zc0kst0FAb3bnWVMwryluRTvtpFKURrhChQVlZXn8YYkD12ur/R4MoZkxmjEETSaHAOKmyKtOQj9P0S1qQAZugWQrMhFBIvL8RJHe72tTiDCqQq2kQyvUHDbi6A1xYbkJhUGu80YqcMxwmMwGSicBFokGe7RAWtXdkZL+KoIEQxsvAoy2OXTWdUwlTp1ooolrCYDnLHINf5oa//F9Wrs95MXjoyJfIY3OHP4FejYP57MLzg/4d7krulVs7ue722zyssMa/fWF0t1iWTNpvTZMy6cXnJ3bBM4WDX9vdrztZXnD2++cWTsQ9NtdYfe3WBb+JUl4zS//Ejo3N6LzT98c+GdD/s+fODW+FtnT3kP7bvp3Lf1+yYW8k+XdU9/bdkpwrvYcratj8Ial+CL7pi4f82w+SF37/wbbzcUn0H/9PGMcVM+mmpd7391fNGePY11Y5MrPjx2/g83zn20IbWLbEny0/MX35xeX5ez215XtHftqoM9zIhnmm5Zzc/ZlOyICS3dM/52T/wHj2x7PYAvHB1cN21u+iPH+iqPbT7w/pEzJ777W+ekT1NWOD4/d3DdVv8aZspdjuXHU6YmPmgbmdYrfzB6zg8N7x7yvfVpfYzefeQ5eafyd3b6/bYdx+NeuLn4wS8ulY+5r7tW+OyS9PETPS9sdE3osLxy/rHzye7eNfYa75SlxeLsk3HPf2lbbrlrQruiPGfdMnW/Td/7z8zvkz8Dd6QnjD5x8c6eC0eGf7p0lach4cTh32zcPt0eqNz8suRZeqigYd/XhQc2HS8bNmLJ65e5T5oqplmfOnR5T2MMuWbi77YYCoQKy1dPe/fzx146GNe47fGe5sPi41tO+C6xtxUczDubGPvNIQtzpmnv6EWG7bNvul+ZhCxpr+/sbJ4y4uLxMbDOly8Pj1m499ZX7h0RE3M9meGwnOvDDOOHaHIKw1w5JuAtBLchfN9PAktIgvlJJkhDHqYOC5SkzMg1lTvppMIKNG+WNz3FLpmLiCLF+HPoIiGWKixEAr2oa4qv0LpitUVVrO7HX6yuVYc53VDY6vRwuJLCcQHNYHhhzEOhl/wMjL/y5F958i+VJ+vN15kn6/+feDJu/l/hyej158l6j8ELtJQR1eEoYaSMmM6DkqhJqzNjqAkjTP81nqxPqp4VcNjQnGxthZ9zC7NYxlxt53J9AYdU5GYdgUwnmYWasllHziAb1F/hyRk+Zx5fVe0nBV7Qp4KkDGehKblcq/X4gSYnA6W5yjRCyVNm+sopv4Mql/SFvNGuoXL4alsZW00bZPsMtsgrAQOa7MqbZeRmujWKN4DZZ2T7s3MdnLmoxGPSZqQmp5vSfy5P1l8nnmxOE205zvI02sPYyqkiW7U5tyDgd3kU3FjIYeXmUszmrKCNKD4THZIZHDX+MnmyVw8oyuQ1UNeJJ68Z5MkRDtWQ28OtQm/b3nf7k2+vHPZGXBO52p1ndvdavyybX3wxllv0V/+0TYvaLl18Y/HjLnb1Q6Z928d7nz02a/e49xfw45/aoTKblhx999zbk2uwf710+vTkWxKPLdv8xZucvfHcae3J9jW7ttUsPtu2tclkGt5y4K3us6qRzf6HlgVyDilfs+o418PNHdLSJ5++pffz7j0LtG+24euyPp+0sX7X5lNZu8eRCe8YP+ke/eyFJ0F7PlU3NvWGuTWTPA01+G8/Uo+YmFbwcfv8pb5Rp277Pn/MqVEjzh1/ZtOpuOErgWU9WlTz4lZXwPeayiMm3intbjw5Li7x5NeaP3+rO1rWnnKsamwsewmo5LmexlHkI6GtJ1bEfRCYMEFfP+bhVYR7X8XcwjPJXOaEu0d17VJ2frerI2d68gphA6KZfyoJeQNs9LjuTHjiH81nLsZPDry9ceOXOz0vjc2d9/vsRX3Pdc89Ujm+b2zCV10b+haEdk2tuxyagrf0vFaavX/BgX2VPS/fPfamjHb2hqMB62T7F8uZOXPyPdJf1gZExw/D+yntPW9mrnwAPv8bzps/8g==
|
||||
@@ -0,0 +1 @@
|
||||
eNqdVXtwVNUZ3220OjKDBRGwHe12BxBr7u597TOzbZNNWJIQNskueVSZ9Oy5Z3dvcu89l/vY7C4CFTs6KOJcH6NC21ESdp1tComkKAgqdup0QFQmOhpbqrYVaXV0aJyxiiU9u9mUZOCv3j9277nf6/d9v+/7zvZiBmm6iBX7iKgYSAPQIAfd2l7U0CYT6cYvCjIy0lgYbo/G4kOmJk7+MG0Yqh50u4EqurCKFCC6IJbdGcYN08Bwk3dVQhU3wwks5CZTm50y0nWQQrozeMdmJ8QkkmI4g861SJKws9apYQmRo6kjzbllY61TxgKSyIeUalA8pmRREYmWbmgIyM5gEkg6qnUaSFYJYMPUiC3torcU0wgIJJuHhtNYN6z98/EdABAi4g8pEAuikrJ+m8qLaq1DQEkJGKhEUCmokr1VGkBIpYAkZlBhxsoaBaoqiRCU5e5+HSsj1SwoI6eiy8Wlci4USVkxrPEoAVHf7G7PkUIqDsblZVzMaJbSDSAqEqkMJQGCp6BW5C/MFagADhAnVJUkqzBjvH+uDtatfW0ARmPzXAINpq19QJO9/MG53zVTMUQZWcVw++XhqsJL4TgXw7r8Y/Mc6zkFWvsqJDw3zxgZWo6CmPiwnqb3z9ZHQkrKSFtDDM09oyFdJW2B7ikQM8PUtw8TLtBrfyxW+2NvtHWWxL/Ylg03El6sY/G0WeugvY42oDlYmvU4GG+Q44I854i0xUfC1TDxK9IwFteAoicJFU2ztBdh2lQGkFAKX5HwY2XCSTZl+KQtKZRVsY6oKiprpIfqnBkMqrnx4Ex3UVhLAUXMV8JaxyrMD+azgwI0BSGdGZTpQJ7nxAQyYXK8aqJquByGAKJk3Rpiae/+qmS29iWSK00xNEUzR7IU6XMkibJI6ln5rU6nbg17aJp+/nIFAw8gMsdFnq48L87V0JBMSCvHvuSGDwQCR6+sNOuKIyoB/3w0hFE0Fw3DyvrzlytUXeyl9ZHsrDYlCtbkCnLoYz0AIS/rpZlkgKYTwMvxnM+T8EGO9UEv7z1MJl+ExEuZTBVrBqUjSFaRkbMma2WQLc9ZiGM8nJdkWucQFSiZAoqZiUZczkGvc6gakjAQDsAkBQFMI2qm/6xiY+/6+rbmcClGQIYxHhDRw+/Zl/f1wWRfQg7lUNwTiAs+RsFrNrV0xF2NmmfA71Kjme56LHd3x2TI5tRIjO1rohgfz7A+v5/lKMZFu8iUUk39PIwMCp2eVj7CmevyeqSzKSA2RrubeiMZF611NRiDjJoRcS4Qz/Zgo9/or491d3DqYKx9TS6Xp1vCrfGe5oS/q1Py+dZJ0J/IJnztZnN8fdf6dfl6r9ri6W/pEPPJNpIiMNIhd52DNKxIih6qjg1FxoYqD40vSM8OTZ1DqBQm5Jq/Iusca8lqjypSrs4RK1cYkX8go5hooNB6rKDJR0lhzIwohMxNG3LxCNfLh1k6BnSlK94Ge/yg2aer9AbIrkl1cJGGgWRLfBDPqQzrZSi6WhwvzfsrrXkJ+v+J6lAPNXcLUFF15g4rKlhXxGSyEEMamSqrBCVsCmTba6gQXkN11vda4wEGcjwDOC7hY3mB4akGskdnvf1vZwyXr4oikEjjZaB1MM2FnEGe55x1DhmE/F4yY5Wb7u5CuVGV1B/sQ99/4Fpb5anZ2Xl81wS95OjHt9/yI9/q5pOrvtg+fk18/OSOBYsbLPjSiuTNPz6ZWXqiNL3rl+Ntpat3tC7k8Crujc9X2n/W8myIPj3l+PZvJi6Uvv7Xafma9z960K9O//uFc3vOfvj0ha+/2vh7b0f9qeuXfLg3f3t69Lv3OLmJV35w/fKNU9rjy9CGG/+0Y/navbueff1eI/KPp3a2PDDy7r7eJ8++3XvmkcXnJm95caXN5p7wneUXbxsLCXsWsE99eTR86KWFdjoSdO7cfOCuD1aNnmq4jrWfWvKfn/904nv+N6Pf+fPLtz5xtTq86FvNW78ZXRqUIm/ALNe/5dXoOyt2Bz955nzq4v3PnVj9/jsLD996w/Q/a3q9Y8WrPrjwVvbobcdHho7fG3zzht3pnlOK1Dv1ykcfo9Gf3P3JjVsXnXni7cU13e8dWPrqVctCJ5oOS48/1Hn6/rVdj/Xd99WeL++848nPtn26rOHacTuSb25ZqSxww4V/W3Rx4NMjf72Nnjr368deDq42zt+04tBdb02J9LYv7Pcd2VAIp361+/Oh35mZbqX14tY9Z8b+/ug3dptterrG9hlmzlM1Ntt/AUZXjK8=
|
||||
@@ -1 +1 @@
|
||||
eNptU39QFGUYPhQbUhmIGZv6R3ZupBqHvdtjr+OApomOEEQ6hBsGRhM/dj9ul9vbXXe/xQO0ApnUKZlZSy2omYjjzllJpSgkMSMpayAHGkeHqRwrM5pKBRRicqDvTjAZ3b++fX887/s+z/s2hmugovKSGNPJiwgqgEH4R9UbwwrcpkEVNYX8EHESGyxyl3jaNYUfTeEQktVMqxXIvAWIiFMkmWcsjOS31tisfqiqwAvVYKXE1o4eqDf7QaACST4oquZMwkal2VMJ80IQtmyqNyuSAPHLrKlQMWMvI+FORBQxeaAgEH5IAKIaQxCgUtIQUQmBopp3vhQBklgoRAIZAWgsJGmSA7xPI9NwHYqm0iNwCPplPBjSlEgVykLtDHMQsHjsS6bEICepSO+6b5RjgGGgjEgoMhLLi179Q28dL6cSLKwSAIIG7lGEUa50wwehTAKBr4GhO1n6cSDLAs+AiN9arUpi5/xMJKqV4f1uIzI6iRkRkd6TvdCHtagWMy/ilmmHhToeIFUEeFHA3JECwC2F5Kj/5L0OGTA+jEPOq6qH7iQfvTdGUvWOQsC4SxZBAoXh9A6g+B32j++1K5qIeD/Uw66i+8vNO/8vR1tsNkt61yJgtVZk9I4qIKiw6y7Jd1MMrBVNUg6SsvUsgoZIqSUZCVfQ26ijCwQKUPQiTm+32TMOK1CV8bLCXSGchjS1MYjFgkPfhOf36wN3wYLUbwRzsGz6KQ+npRK2dMLNICKyJITNnkmnZdopYl2hp9M1X8TzQJW6PAoQ1Sqs1AsLWxFmOE30QdZwPXAfjPkjInlW78PvCspGF+Ruc2nFG9VAYVFengLp9EKQzX8SIBlB0lgS4QuEZHTYANJHiUo7nWG3MU4nTdMOSDkhpKg0J2ScjN3BVjrp9hoe6IbNYiO8kuQV4DFXLukCDAfJkiglejin/MXswnxXZxlZLFVKSCU9wKsHRUmEoRKoYBV0I1oa77UCQzi9OLtc73aydqqqislgYYbdARhAPo/XZYGeu+MHI0cRvfQGLIGCTV/FbE9+Pc4U/ZZu2Fjh++G5lXNrW7ZM/NLU/XDs7eKxM3knv9/t79gFm1rdzdKzBefyD51fMXPtzLWH6pNuz64fa+xn9r63emqw9eKtyetTJ2Zvjxe03hz/Z+I7/+eXXvlyiX+M9+hGVuKjb69pfrUkvro5fsVfAyUDSUfGPgrPVF86MXx15u+JmlObyfSyrSnCwGu/TQ+ezmnvGcnal9TzFhe6fPnJ3tjTwZTDKf1E74/jzRWtMX03hp7Y8vMzhscovb717LSntzP2wrBpX9ZI/fpCNjl3uvQcvcz37k/9S8uWX+mPXftHnJLw+9Z453Dir6adVXE79geW7whIm74orTMm8xL2tx0cWt3YQO+5cv79FSNnYzYfevoGiHGzw490p3rjbnEttXvaOro+fXy7mnqhqGX51QtF617+rK7hYvIyI/+0+4rnsYI1U/WDe8etbWhoYBX4NuPrJZM335lobPhXqH/zYFlXX/mqp0ZndzuO/HkrATM8N7fURLZuWDkdYzL9Byv+bUg=
|
||||
eNptVGtsFFUUXh6RthaiwUQMaCcbEB+925ndZemuMXRtS+1KH7RLbQHFu3fu7kx3dmaYudP0AYlUIU2BkMFQAsEX3e7i2lKaklSiqDxtgKRAiVqJVQMiVqBi8AHa1NvSAg3Mr5l7zvm+c77v3KmPV2FNFxV5UqsoE6xBROiHbtbHNbzawDp5OxbBRFD4aHFRqb/Z0MS+eQIhqu7JyICqaIMyETRFFZENKZGMKi4jgnUdhrAeDSh8Td/GOmsEVq8iShjLutXDsXZnunU8xepZUWfVFAlbPVZDx5o13YoU2oRM6IEfSxITwQxkKmkxAwOKQZgAhppuXfsaxVB4LNE0JEGDx8ABBCiGDWCnBKyDXUihCI6odB5iaBSftbFr4wKGPB223/JoVFB0YnbcN0A7RAirBGAZKbwoh8y2UK2opjM8DkqQ4ARtT8ajCpmJMMYqgJJYhTurgU6gKEt0LkDECKatmh8VFvlX5eWX5RbGboOa+6CqSiKCI+UZlboit45NC0iNiu8PJ0Y0AVQomZhd3vE2M4prqB0yw9qcbhu7715qCdKOY+po/NN7AypEYYoDxqw2Y7eL996bo+hmSwFERaUTIKGGBLMFahGXc8KUmiGPDGrGs4vvpxsL3qVz2Di7LbNjArBeIyOzJQglHXfc8eBOSYIa6QCsC7Bc1wRoTLQagBTKYH7I7h0XUMJyiAhmM+dw79GwrtINxm/FaBkx9Poo9RKf6o6Prd3uolfGN2FTNIe6ah70C0Y6w7qYAqgxlHgBw7k8DodnAcvkFfhbs8dI/A90qcOvQVkPUqdyx5cmjgRDDmM+kf3AdUmM3Swg8uZn9H0Vy+VFjBLVtzQYUivD4mpfdkmuXIjzDtzVRdFCUBZrR2lH6vrmOtwuxwLeEQA4EOSB0525ELjddg4E7PZM3pnJLXTyruYqEZoJzsYxIUUJSbgdBQGCSMDgtjRmPKei0FuQn91aDkqUgEJ04IchMyorMo6VYo26YSaQpBg8XX8Nx7IXgxJvhbnfzSGHk0OIDbpYJ0J28BJdm3GZ7sgQHbk7o7+BddQKjR4dm9SYtjHJMvpM4U3vlt6s1PXDjddbtu/3dTX4DrRmzWl882RK8qTcM+wFsmHJFfaCmLxs+AVf07asXek363p6mtpnzXFlXixdoaRdORL+/otzXWn/3fht6Je2J6/GH59Ztqf35c5GzTX/cMo7Rw9AbUbF9OfLMhuaG6Z/JzQ2/ylfLbh88tbez+teHxx4sUxYv62846a2dueeG/pftpZDU+Ytx+zvm5fUD56dSba+sfXsnJvdaUNTC6tbHu5RdyyrkN4f/Km7vXvn1uRzvVP7d/262pcX7Qn+7bV9nDqwuf/I7NOPda9LKX8i6UTS9g+eSr34yIWpCCataaqesaZ2yfkvvYsHTmya/+zpfDjNe+iZHHsFTDpzLPlUsfZj70P95uGs050rGdIp/sxseO9S+aLBYPm50uWTvx4Qma6VBw+d/yal88S3T9srZs2eW9XWdPSfafytpQ0AHpl3POXkHzsuNa77anebr2hXR9+15/Yfr62MWJu9nwy9Wrzoes7xoZ7o5R/eHbpW9e90i2V4eIrF9ARTHZMtlv8BHmuAkg==
|
||||
@@ -1 +1 @@
|
||||
eNqdVW1sG0UadluuQoJDOqVAdRLqYsEJQnez6931V3Cp4zjNR12ncUJT0J1vvDu2N96v7sw6cXL9cYWrQNypXVqoysePUscuJm2aNrQcbenpuCvtFXEH3J0IEhVV+VFAgBAoIEGBWce5Jmp/3f7weGbeeT+e531mtlWL0EKKoS+ZUHQMLSBhMkHOtqoFt9gQ4UcrGsR5Qy73JlP9+21LmWnOY2yicEsLMBXGMKEOFEYytJYi1yLlAW4h/00V1t2UM4Zcmtk55tUgQiAHkTdMPTzmlQwSSsdk4u2HqkppkALUkFEgQ8awMZWBwELe1ZTXMlToWtkIWt6tvyYrmiFD1V3KmZjmGZHGtpUxXFudrHJkRNiCQCOTLFARJAsYaiYpjBi6vlgmsLWah0AmZe8o5w2EnUOLC5kEkgSJd6hLhqzoOedgblQxV1MyzKoAwxrJXod1mJxaAUKTBqpShJW5U85hYJqqIgF3v2UIGfpEo1oal0x47XbNrY0m2OjYmU6SJKJdLb0lgrhOcYwQZNjDIzTCQNFVAiGtApJPxazvn1i4YQKpQJzQDTadytzhQwttDOSMJ4CUTC1yCSwp74wDS/MLRxeuW7aOFQ061VjvteEam1fD8QzHMYGpRY5RSZec8ToNxxcdhtgq0ZJBfDj72EPz+KhQz+G8s58ThAMWRCbpH/hIhRzDNtpWJlzAN89WG430QrJnnsQLntvL7YQX51R/3l5NcQEqKWHKx/oEihPCvC/MB6l1if6JWCNM/3VpmOq3gI6yhIr4PO1VKW/rBSjXYtcl/JRLOKnGTZ/0KQ1HTANBupGVMzFI980piO5qPzrXXbRh5YCujNbDOqfqzA+PjgzLki3L+eKwxoZGBV7JQFvKTjeOmJbhhiEJ0Rpy9vt57lBjZx77GqmVpTmWZrlXR2jS6FBVNIXgWf9tyBg5ZZFl2VeuNcBEd0TwVYGtf68ttLCgRkhzY191I4RCoZPXN5p3xROTUEB8dbEVgguz4XwaeuVag4aLF1g0MTJvTSuyM3MXmaSDok/MhPggHwqwPlEUWAGEIBviQhzL+WR/6M9E/IpEvLhkmoaFaQQlcmfhkjOzWgMjrs4iPCfyflJpK6XokmrLMGVn2g23BtRKmRZUDSBPxjroGJDykE7V+8+ptm/eEE10xWopkmTMMAoKfPL9JcvSaSmbzmiRbpvBw1GspyxN8aVD6YQh2pIciOfibahL797UWSxuiveYWxL2MM0FfCEuIIpikOYYluEYjjZ4vbetLbNe7tR7Ep1tfWzRH+gG4kDM6var6eSD3XhTVttUjJo+f1f/6IC4rs0uDPWW1qc5ExaVUDZbTG5WChrGMcvkt8QHbMbfNxQl1QCcj7S0UqQ3FYJvpKEQmiiEdvUhhtl5fbRSch2DCLP4NmylOsl1n9TVUiuVcsGEZAQaTCkYRjYYOpzZTTCwi4ocCchxkOhghmzWig32GcxwMj2QeyjR548XgqZtGsPRQk6IM2aiJ7cABJHjabaBg58VgvUuvJr6/5nVsUF6oeDppDn3rlV1A+lKNltJQYsIyKlJqmHL5GK3YIVw3hfd7EwHZYHNZiWBC/plnvVxdBu5Mue9/e96KLuvQhWopMeKknM0z0e8YUHgva2UBiJBP5FT/fX7fcXtST339yVo1RM3eurfMnVjIrmUW3Hyi8mR2Zu3/yxw4+37f3XXL/WO9Sti3d9MPXx59j+zB52VP5z4+MjvVp76mn1371vrTzd59iFu+b7uczWrMvZd8MrIoFGrHrxjw8Y1H1xUhbGts98cPffb5vDG6Hmxec0B5taBt6jCkbGHSrv/+LZ4x+y6j9nzRz7dfuCedU3wD5c+/9D3qfXZnn9/Nn5lYuDp8q3Lue3Hb/B8eCIkdTx7ac/K2CRa23xz9LZ7zrzR4/nrrscf23nn5b+8c+Tyc53xp9/2T6X/QX90Q8/uNWvvfeAX3S9euW1F1y0B1Hp81/uPfPW3A5nJJ9CqHS+Fz773+n/PTH/7+co3ey/eT8fOr/ryqWPh3Tft8O38051LdzX9/ESP58KzSz5JP//oV/2h37Davzpex4ePN1XP/fPs9N17B59punD6k+8vHTvrjPsvqmv3lZu5B+4dO7f55dPGD7mpyar25Rt3K8+3v7f8zNDUO8fWfnFSf62819rzPUH3xx+XeW46fd+7zlKP5yeHyHrs
|
||||
eNqdVX1sE+cdDnSsdNqg7WhHitrerFYgyDl3vvP5S+4WO1kwIbWTeCFmouH13Wv74rt7L/fh2CaglbVoKF3bK6hp1Q/RJNhtloQAUUrDR2Gj2ibaIjZ1ItCVdm01NqXrumkIqSvda8cZieCvWbLPd+/v83l+z+92FjNQ00WkLBoRFQNqgDfwjW7tLGqw24S68VhBhkYKCUORcFt00NTE6bUpw1B1b20tUEU7UqECRDuP5NoMXcungFGL/6sSLIcZiiMhNy1ts8lQ10ES6jbvT7bZeIQzKYbNa4tCSSJkSACiC6XxJY5Mg4hDoOm2GpuGJIhtTB1qtu1bamwyEqCEHyRVg2TsTtIwtTjCdrqhQSDbvAkg6XB7MQWBgFt6aiiFdMMaW1jkAcDzEPtDhUeCqCSt0WReVGsIASYkYMBhXJoCyxBYw2kIVRJIYgYWZr2scaCqksiD0nltl46UkUorpJFT4Y3Hw6XaSdy3YlgTYVxEXag2ksNoKgRt52g7PZ4ldQOIioThISWA6ymo5fOj8w9UwKdxELLClFWYdR6bb4N0a38z4MNtC0ICjU9Z+4Emc+zh+c81UzFEGVrFYOTGdJXD6+kYO+2wuw8uCKznFN7aX4b89QXO0NByJI9wDOsVamwOHwkqSSNlDdKU+1UN6iqeDfizAnYzTH3nEOYCvv3bYmVIBsJNcyR+UPW9oXrMi3U8mjJrCIojmoFGOCiHk6A5L8N4WQ/R2BwdCVbSRG9Kw8GoBhQ9galomKO9yKdMJQ2F4eBNCT9eIhx3UyofjyEJsyrSIVmpyhrpIFtn1UGG6g/PTheJtCRQxHw5rXW8zHxPPtsj8KYgpDI9MuXJs4wYhyafmKi4qBoqpcEFkbJuDTI0O1Y5mcN+GPdKkTRFUvRUltQwFJIoixjP8m9Foro15KQo6siNBgZWFRZzkaXKnxPzLTQoY9JKua+HYT0ez7GbG82FYrCJx8VNLbTS4fxqaIesH7nRoBJigNJHsnPWpChY0w/gm06WYwDnctJ0nIlzgHYybifPO+Jx2gGcXMLlfAPrXORxlBKZKtIMUoc83kdGzpqukUG2pDM/g/043KmPEBVeMgXYZsbrUakH3UeoGpQQEA7wCZIHfAqSs/NnFetjD9c1h4LDbbjIIEJpET5zYdHKzk4+0RmX/XqCozktHNmoC4w9vSnTwWyoz6wPqmZrY0NQkNrzrhTNBc0mj9xD0i6WdrjcboeHpO2UHauUpELOphArpSI9kiMUYlvr8jFRocS0XbBvyCWNpBM1mNFQw8b2bgwzn3C2bwyCSDYaVpxNdTnUGg8505nG9nadk9nopqA7nd0cyNY1uzqCsXSek1uEaCJmpDwUZW+I4RaBkfLX+gg8sCIG3V+RDYllQ5ZE4/JSc6LxEUIZGL994Yr0Eevxfg8rUs5HtJUQhvgKZNgmGtD/MFLg9F4MjJkRBb87Bto7pM0bOsXubn0zciZBc93miJnPIEHgskoItCQ6+fWmqnW75yHD0k6SqoDDUay7PJrXS/8/q5rsIOdvATKszr7IigrSFTGRKLRBDavKGuYlZAp422uwEPwR2VoXsyY8NM+wNO9h3dDjTiQ8ZADv0blo/9sZQ6VXRRFIePAyvHU4xfhtXpZlbD5CBn43hzVWft09WigNqpJ8a9Gx+/uWVpU/t+Dv118/0XpKuUh9+/hf1rlf2jOQufv0O/CHj27Z9TK3Ynp82a6ac5P3xO6sz8daPrl0q0eZ2bpqvG9Zb2/4c6t3S9XSwbeWPdb14C/t59/b3tKbi5y9OPjum5/9AHoe6n3yhQPTH3187WB2xZnT/3iPWbuD/9OaS0v6pGO+X68dEDYOT1/Z/izqs7256qfvpid2//Ebj4wibt3f+U83vR47Uf38qOtboZkPv1xc9aGZ3dNY/KL//KnP/jn6++9bUmTG3lxly7+wNnBndfyvHTWrI7nfXP7mxS+vLJk4d5IMPO4I3NVy377lKvt+IL30nu/K937H+9Wil9/fc7TxwW2BfXd/tOa2fP/PDx26qlwLnZ5a/Xxg6oMLB3dMHZk8cdu5Teyf+/Pyi69MPrHmd0c/efr8rfS+4pKTTwHvVz/eesfA54v7Z5Zffmbr5ENndov3M6pv50zswmsn7ludH23vEsSadwrr9kbJv+32jAauXLhaPf4cvPaHty9NnH3jF+lrjTsW7/3VyVfP3N6yyrw329X33Ni/O+X+6uW7loD/iCuelaiCPzUC0dmeBy4/vvLoIW7s1MqB6ifF8/8Sv1hVouiWqgF09VwT5uu/A4Sdew==
|
||||
@@ -1 +1 @@
|
||||
eNptVWtsFFUULhIU/CMiKlEC40JCgM50Zmf21Vqx7FJobNnSXWiXh5u7d+52pzuvzmPZLaKhEklEsKPGxII8t7ultuVRVARKAgmCkYAGDFlMIEGMKCKJ0SiBgHe3W2kD82N37j3nfuc753z3THs2gTRdUOQxvYJsIA1AAy90qz2roVYT6cb6jISMmMKn6/2B4G5TE3JzYoah6uVlZUAVKEVFMhAoqEhlCaYMxoBRht9VERVg0hGFT+XeXWOTkK6DZqTbyokVa2xQwaFkAy9sqgDjBCA0IPOKRMimFEGarZSwaYqI8nZTx+u1q/COpPBIzG81qwbJUg7SMLWIkveV8S6D/3VDQ0DCiygQdYQ3DCSpOCXsmMeiKc/abAwBHid8uWRiOqbohtU/Oom9AEKE8ZEMFV6Qm62+5jZBLSV4FBWBgXowcxkVSmT1xBFSSSAKCZQZOmXtA6oqChDk7WUtuiL3FjMljZSKHjb35LMjcV1kwzroxySqasrqU7jaMsFQnJui9yVJ3QCCLOLykSLAfDJqwX5kpEEFMI5ByGInrczQ4f6RPopuddUB6A+MggQajFldQJOc3MDIfc2UDUFCVtZb/3C4ovFBOJZiGMq1fxSwnpKh1VVoxJejDiNDS5FQwRjWTjoDFSUuICv3ZzgMo+GIVEkvCqyGSbfaEoQN1aK5mIm0LnC5mpY6fLpz4aKGUFgPaouESFiohiTjsnsYl8PhYEmGoimGYkifSYVbW5OaZ+GS1jgdFms4KVXbxFeFFtY7l4nVcktESbqF+fZQi1iPElEoBhtbwi2IYpUmuQ60ur1L6huqG+mQ7EkmI0Ba4vVG/WJVBYHZmQmBr+SWsUq9BFoT82vbgn7GHpcSTVRYEhvtXkOJL/fVsgbV5mUXBVtCI+jRmCFdZOikOTedf/qHtSEiudmIWbsZ1t2tIV3F9wa9ncElM0y9PY11iM6czhYv0C7/aw8k/FzahzVpDQZjZinBuAg/NAg7becIhitn7eUsSyysC/Z6i2GCj5Tg/iC+enoUy3DBsOSzMGbKccT3eB8p9sG82HEn8/TxLSVRUlV0RBZZWb1NZMPQ5CBrfANDN4tUtGYgC22FsNZgQfWr25KreWjyfCyxWqI9bRwrRJAJoweLR1RNyYfBhEhJt3Y7HPb+omVYdz04V5pkaJJmDidJfM2RKEgCrmfhtzi+dCvtwMU+9LAD7hfCgy7LFbpBHxvpoSEJCzYf+wEM5/F4jj7aaRiKxS4el+vwaC8djWTD2CX90MMORYhdtN6bHPYmBd7KzcSLsJPnaIhQBEVRlHaxHIMflkEOYI9GgRugr/DoEyBGyTdTVTSD1BHEs9pIWblSCSTzM6aSZRysE2daQQgyFE0eBcyIT8nnoFcQqoZEBfB7vdWkF8AYIgMF/VlZX2hxVV2N94smcqSQSL869J3IyoouC9FoJoA03BirB4qKyeNhqaEMxmqoClkH3Zg95umMuu2Ig9BJzsdjaBjtf9ml85M2C0TMPQGtgRhbaSvnONZWQUig0u3EbSp8TdZl8rnKzSfHLJm+cXxJ4RkrdhyXT9ATfbduPxk/N8G2Y7L14qSWcX1t3RcCSxdsHphJbb55aUvPs1evzN0wueH4yQ2pbblcpe+9db1EM/HC4l2fzwlJ21+5+OOGU0fu/PLzxUtKmB/M/hDf8nFiYJ3bXXt7Xfu9lc+A5Z91pL+fVTpVO9HRSUW/NSyUO/pE3576cSuk2Z/4D6wvb+zkO88eOJ6bMvs779nLtq+nVPylXO+ee/edrXU/zetacCu1qWP7PObxs907Sv55ve1qJ3HsVcDt/COe3cioh268/NRJI3thx/RzH360p4/tmv5v6EoNuTbgvnb5esdv3zRd/5WJ/D54pftO46Retq+r/cw0c3zt+cDUlbK7fewM78pN10JvzOjt7jgUfNOYOHEl17Rq6/nYwdzfp1c8nb2Z7t/bsOulCXffnyVu/PSCo/wG/9bVex+cuv58Scn9+2NL7m5bOg08VlLyH7A2NRs=
|
||||
eNptVXlsFGUUb8FYg4RUg2iIqduVxAQ725md2ZNU6LW1Qnd7LJTSYPn2m293pztX59ijFdGCRCIIg3IJVqBlF2stYhsuqTFeoCiJHNESQvxDMFYTbqMkgN9ut9IGJtnNzLz3/d7vvfd7bzpTUaSonCTm9nGihhQANfygGp0pBbXpSNVWJQWkhSW2p9bX4O/WFW54dljTZNVdXAxkziLJSAScBUpCcZQqhmGgFeN7mUcZmJ6AxCaG4x1mAakqCCHV7G7uMEMJRxI1s9ssczBiAiYFiKwkmERdCCDFXGRWJB5hq67ip+VLi8yCxCIevwjJGkFbbISmKwEJ+6magoBgdgcBr6Iis4YEGWeArfg0aXEtT4URYHF6F3Lye8KSqhn9EynvAxAijIlEKLGcGDI+DrVzcpGJRUEeaKgXExVRpiBGbwQhmQA8F0XJ0VPGJ0CWeQ6CtL24VZXEvmxihJaQ0f3m3nQ+BK6CqBmDPkyitLq4NoFrK5ooi52yUJ/ECVUDnMjjYhE8wHyScsb+2XiDDGAEgxDZvhnJ0cP9430k1dhTA6CvYQIkUGDY2AMUwc4MjH+v6KLGCchIldfeHy5rvBeOtlBWi3P/BGA1IUJjT6YNByccRpqSIKCEMYxdZBJKUoRDxrncvJYWGGwJCCWJKqUxHvR4xPkeuSHKxSo4ts7ZtiDm9NQtbK3iWgKKpC9ssQVtchNBORjK6nA6rTaCspAWnDNRHobWhL4gZqulyqpF2MjRkTaucYE34q22+ivraOgi+RoZvORzufyNXrHyRVQpu1wKFKOeejVQB1B9VX29FtNL6/yoIVIdb5VsdY3WJaXQWeubXweiMsW3Ly5nHKxVnWPClPUox5Z4I1RFqz8QtdlfXBimvazPW9nkrWTb2LL6Jb4oYnxtLQ0tiwOLrNVwHGcnZSfILG07yTjJ9NU/phgeiSEtbHRTVnKvglQZzw5amcSF1HS1swerE/1wPJUdot2++feEPaOnAivVGPKH9SITaTfVAMVkJa02E2V307SbsZuqavx95dkw/gcKc78fD6AaxOKsHBuEFAzrYgSxveUPHIGh9Ajg/qbp42ElUFyWVERkWRl9i4n60e1BVFcMjM4bISkhIHLtmbDGUGYWYu3xGAt1lg1HYwLpamdoLoB0GBzMHpEVKR0GEyIE1ei2knR/1jKmxl6cK0lQJEFSR+IEnn3EcwKH65n5z64w1eix4WIfut9BkyIIL7sUk+kG+fl4DwUJWMbp2PdgGJfLdfTBTmNQNHZxORxHJnqpaDwbyiqoh+53yELsJtW++Jg3wbHG8Cz80GIjaZvL6bJbqQAJSARJu9NJIRsZDFIOysY4DuNtyEGMkm6mLCkaoSKI97WWMIaLBBBPb54SmrLRdpzpHBMnQl5nUYMeqJDSOWCBywriJcDug0ECAhhGxKj+jFRFk7e0prr8wGJivJAInzz6rUiJkipywWCyASm4MUYv5CWdxStUQclyD1Ff2mQMuihIM1TA5gQuxslSFFGGl9MY2v+y60nv3xTgMfcoNAbCdInZzTC0eY5JACVOO25T5ovyejKdqxj6Jnf1M289kpO5JuPf3btrN/4ofknmr7qcmHIiNG/Wa28qked7u5o/HFnfe/yX7V+f2U90nO7Mv/yya97j/tk3N3716px3hpdefrZs73Tm2XcbpyaEs//Yaz4oODBw+edke9eFfaeOne0/44hdTAKpWJs+PTiU+3bo7Krt52bP4248puZNLTz07YnEjqLTTNPgTvsk/4ZNM44NfBdYu655+1XGO+M5hGbl+ehrBY+VHf/89uq1S77YHGTd7sbWK12b5g30r8kfWTvlie+PbinY5nGPFDpmlh7c//uJQrmW6Xpj2ZG6Ef9cbUV9cvBk3+01sZtlrX98dD73uievcf23pzYXmMteOLfmvbc2nHE3NP/w0yud01a3vTLYfXtax9YdT81kW3duW9e/9eKkDdUX8x7dtePqlIp9fx08NrPwx5yKlcaK0OFLy59uHrk29++SX58u6jz+8eTCfy8eXb595ZUv7rT+uej8w8mC81t+29D90JMnTTuXrXN3dd16v2rv9bKRx3fonw6t6LhUfmNyutiTc26dzvfcmZST8x/1AE4b
|
||||
@@ -1 +1 @@
|
||||
eNptVXtsE3UcH8MgUUEJCkQTvBTlIbvurr2+NhfoujoK6zbWMcoIlOvdr71b77V7dO0Ir/GcEOBEIT54CKMlzRiMbTwm4x/kFQyJgpgJ6CAxPkZMQBNBjPhr18kWuD96/X2/39/n+/p8v9eUjAJZYUVhRCsrqEAmKRUeFL0pKYN6DSjqugQPVEakWyorfNUHNJntfYdRVUkpyM8nJdYoSkAgWSMl8vlRPJ9iSDUf/pc4kIFpCYp0vHf7cgMPFIUMA8VQgCxebqBE6EpQ4cFQDTgO4QFCInViBL6CoqYiQUDKiiEPMcgiB9JWmgJkw4olUMKLNODSorCkomajBVU1OSimbQUoxeFbUWVA8vAQIjkFQIEKeAkmBg3TWJjRtiLJAJKGaf+Q80oLIyqq3jY8lSMkRQGIDwRKpFkhrB8ON7JSHkKDEEeqIAXjF0CmUHoqAoCEkhwbBYmBW/pRUpI4liLT+vw6RRRas/mialwCT6tT6exQWB1B1TsrYBBOT35lHNZcQHAjYTdiR2OoopKswMEiohwJ40lIGf0XQxUSSUUgCJrtp54YuNw21EZU9INekqrwDYMkZYrRD5IybyU6hsplTVBZHuhJV+XT7rLKJ+7MRhw32tqHAStxgdIPZhpxYthloMpxlBIhhv45lqBEMcICvfd+IECFAkG+qDwQrGFrg3MCxVUYUeOiTcAPfJgvEvVWe+qKrQsX0VUxu8BFF5U7UdxmcuA2i8XqQHEjZsSNOIo5Of8cY71pzkJ/CRVjmGrVNN8bwGzEArbGBVSV9ou1NQruqdHCc/C4sQEPlS1g3FXOuFaplXKlbndFsadeNhbzrgYz4W60+nhigRguRGB0WpSli2qpWqu7qsEi1FviXo32WZ0RWTLFJDzgsTJOsnw+VQYwvMFmLhkantliQrFshFaMsGPpp22QGxwQwiqjH8AJ4pAMFAlOD1ibgCVTNaWpBfIQfHUxmR2j/RXznlB4QksJ5KTeU81oeQhuQyooFTFhJgLBiQKzqcCCI6Xe6lZX1k31MynYXi2TghKCNHQPUj5JMZoQAXTK9Uyy96TJDjuZDh9OKQpikqgANBuV3upHqwb2B+op6RiYLFSUw6TANmbc6j0Z1jc0xhpoSqNpJtrAY45GwswGgUaFOrNXJFlMu4EBobyiH7BhjrasZpB3KZgrhuIYiuHdMRSOOeBYnoX1zPxml5iit1hgsU8+baDCrQPXXZLIdAM7M9RCBjwkbNr3ExjC4XCcfrbRIJQZmjhslu7hVgoYGg1u4pWTTxtkIfZjSmts0Bplab33LXgI0LagzRHEYeEpm4V2OIAdd9B0KEiE7BhuxSyn4OpjKYiSbqYkyiqqAApubDWu9+bxZCy9Y4rMuMVshZkWIqxAcRoNfFqwREznoBQikgw4kaSPuN5DXSTFANSX4Z+eLIGT5vW4jvvRoURCK6SBr0VSEBWBDYUSPiDDxugpihM1Gi5LGSQgVpVzkd5ppwkcw3AToIHDHgI0WgzX0CDa/7RrSW/aJMnB2KOU3sGYiwwFBGE2FCI8WWS3wjZlvilrEulchfC5EVvf3Dw6J/OM5OY7hRvYK6f7X1u8VG4eeWlD3Ze7kJ5XR8vexftmvPR+7eYtM4zz2vpeWPN46faavOcvrd/48e7bV+5GRiBlM63IhztbG8WJf728bdXeVOnf3YF/7ScDroed/an7hdf767fdKZhxKO48J6becPecuLjhZnPNiLcp+fD1yzfqlnzXbxpf1MXsaP3jtP/WtRcnF7s2H6u9vqNqx/xJ50atnJiTc+Sht7B7wuPmMfTeLX9OOnC3krviyjFs927ajn9y8Zv2k3s87q1f/9S16rdxtVdzI+tGgdVT990823d7mT/s33H23tg9tzoebZg7dpxzyvTXuf62By1zlXurL46lnvv1/J7claf+MfcJm0wgurZr/Mp3Nx699nC297MH5jPmKyU9Y6ZPOdUp0fcNHRdy+yqMkdiuwIPZPT+jVycvMbXWTCvE94an7vLpTc0Hd9/79tNfdt847t55/vzmY+s/mEsVzrx8x/D9hVmzOuznzG2P+qrKyg1b9fYff9/Sjz1Ydv+h/NGFO7Ng4R8/Hpkzratb78nNyfkPIFJRJQ==
|
||||
eNptVXlsFGUULxCPqFFT0UQ82G40IdKZndnZs4e17LaldEuPXbSFkPrtN9/sTHeuzrHd3arYVv4weI0oEpNKeu1iKQWlYgUxHqmagBojlRQV9Q/iAcYgicYL/Ha7lTYwyR4z732/93vv/d6bvmwCabqgyEvGBdlAGoAGvtGtvqyGukykG09kJGTwCjvS3BSODJuaMHsfbxiqXuZwAFUgFRXJQCChIjkStAPywHDg/6qI8jAjUYVNzYo9dgnpOogh3V62qccOFRxJNuxl9ggSRZuEbMDWqcTxT1QxDVsUAU23l9o1RUTYx9SRZn90c6ldUlgk4gcx1SAY0k0YphZVsJ9uaAhI9jIOiDp6NMsjwOKUThXdPMIrumFNLKa5D0CIMAKSocIKcszaG0sLaqmNRZwIDDSGyckoXwRrLI6QSgBRSKDM3ClrP1BVUYAgZ3d06oo8XkiGMFIqutw8lmNP4Mxlw5pswiSq6x3NKVxP2UaTHpqk9ycJ3QCCLOICESLAfDJq3n54oUEFMI5BiEKvrMzc4YmFPopujTYC2BReBAk0yFujQJM8rgMLn2umbAgSsrKB5svDFYyXwjEk7SR9ry0C1lMytEbzRX9z0WFkaCkCKhjDGqQyUFHiArJOLrmmowNyHVGpsjb9oDvQ3bRefyjlrVlvxkFAhrViMl0fqlFrYmRYpM0Etc5sp/hGgva6aKfX52PcBE1SJM6ZqDdDBt1c16lIzqivfmMqInVQzTBc19jA1DJiWCATDfUh3ddYw7W2mu4gUtvWyc1NnBxYX90AIw0BMaShQFuQbE9Wp9zORMuGuDvUBTtCAlPdqdVHNRALAX96Y7Mn2F5uw5TNhMBWJmtJ0q02JJy6DMNdUF3DC2l6bXojG6+JSeFgROl6MBzkw+2tvpYFnCnKQ1AF2h7K5aNy18S8YkQkxwzeGqYp324N6SqeF9SfwYU0TL1vBKsTHfs4WxicoaaGS8K+bSSIlWodifBmqY3y2BqBZnNSTreN9pQxTJnbbatrjIwHCmEiVxTmaxENyDqHxVkzPwhZyJtyHLFjgSuOwJHcCOD+5ujj0SRQUlV0RBRYWeNtROvcxiDqgwfm5o1QtBiQhXQ+rHUkPwvd6WQ3C02W5RPdEuVPuxghikzITRaOqJqSC4MJEZJuDTNO70TBMq/GMZwrRdAUQdGHkoSGSyEKkoDrmf8urC3dGsHlp6YudzDwpsELLuvKd4N6Z6GHhiQs41zsSzAuv9//9pWd5qEY7OL3eg4t9tLRQja0U9KnLncoQAxR+nhy3psQWGv2HnzTwXm8yIO8XhhlIeeNAhcHnSwLvBTnpwDyM2/h3SdAjJJrpqpoBqEjiHe0kbJmSyWQzG2eSoZ2Mx6cablNkKFosihsRoNKLge93KZqSFQAuw9yBASQR8Sc/qxssH19dWN94GAbsVBIRJM6937IyoouCxyXCSMNN8Yag6JisniFaigTqCVaq9utST8NGRfN0ZwPeZiojyXW4OU0j/a/7EZy+zcLRMw9Aa0DPFNpL3O5GHu5TQKVPg9uU/4t0pvJ5SrHppfsXbnt2qL8tQx/Ll58qvWo/BV189tnVh+s+KoXffTGmYbeO7ctvap4eeU9Wx/YHn+SmL53ak/F91scq1bv6Ms84Kg4dvYGbsffrUWrYjM3viBM7vT89edMokcnPt/y1Ndffvbtl+lzp3ue/Ca7r/fu/pfRVb9sGbQemtk66Ekvr1smFXe+d3ajTB49zR3evLdn6N5t9x/9edX5mek0ufnkF4+0kCf6Xy+JPTdzo3x90eMvXfjk9v7pujf6p8/eKljtJ3YnfigpevHjWFDgPhrq3z27dsV1fYPP/jucWnqmdG3//rqBd8u43/64lny3/7bQrl/f3zy1Zri8Vl1608slz5/46YPjzu8H4cDqrZ/+vaTqFSY9Bocqb/rn9qqp76ZeLe7MPHd+f3Oksqe06LHfD/946niYbrljtPivp0vu2LFn+8CKN6tKHNe0rp1cee409/szm1DL5PmK2VHnSRdfd3oq+U173S1DO40L5cd6TmVPkccvwqriDx9ePrkhxA+kNiXvbNipnO+AP9z1R++t8FD31au7dv5W1Vbx46e7JmrP3XL9wZn3tq4IhyaqBi7senZ6Zb4ny4oyzKFhP27Qf7FZXVw=
|
||||
@@ -0,0 +1 @@
|
||||
eNqdVWtwE9cVtovbuCmQtKGY0OmgKAmQ4JV3tbJerpzIsuWY2JaxZPzg4V7tXkkr78t7d23JxpNCMm1TG6dbm2nKQDLBD4HiBxQSHFI8GVIaaGknkISODW1DJo/OZMiESaZJhhL36uFgD/yqfkjavd95fd855+5KdEAFcZKYO8aJKlQAo+IHpO9KKLBdg0h9alSAakRih+t8/sCQpnAzD0dUVUbOoiIgcyZJhiLgTIwkFHVQRUwEqEX4v8zDtJvhoMTGZ3Mf6TYKECEQhsjo3NJtZCQcSlSNTmMjNliHDGoEGjohwD+KgRMNfu8jxkKjIvEQQzQEFWPPtkKjILGQxy/CskpYJELgRA6jkKpAIBidIcAjWGhUoSDjKlRNwbakicRvJInPhFXjcsphSBPTRWLjb/46u40iEFKnYai2ZlPBABYiRuHkDMZYCdWFqZowQAYKtsPEoZQPWcF8KCoH00+8xIB579nYOFtODBt7enB5mF9OgSxO7SYSl5lFSsEoZFSM7NnWk4hAwOIQzwxHJKTqE4uJnwQMAzEnUGQkFnvXx8NdnFxoYGGIBypMYrZFmC5TT7ZBKBOA5zrgaMZKPwxkmecy4YuiSBLHsuoQqURuPU6m9CCwlKKqH/PhJNxVRXVx3CGigTJZKRN1OEYgFXAijxUneIDzGZXT568uPJAB04adENnu00czxhMLMRLSR2oA4/MvcgkUJqKPAEWwWo4ufK9oosoJUE946m4Nlz28GY42UWaT/cgixyguMvpIupGOLzKGqhInGAn70F8gJ+b54aEYViP6EE05DioQybjf4ZOj2EzV0K5hrAU8dyaR7fsDvsfnRfxnTsFwOdZFPxmIaIUG0mqoAYrBTJqLDZTVSdNOC22orAmMebJhAreV4UhAASIKYSkq5mVPMBFNbINs0nNbwU+mBMfVpNLHo0XAmCwhSGSz0seaiPrMxBNV5Ucz3UVIShiIXFc6rH4yrXxnV6yTZTSWjXR0CqSjy0JzQagxoWNZEzwCqTA4IUJAmBwrOZE9mec+iWslCYokSOpEjMCzCnlO4DCf6e/s2kH6cDFJklO3AlSpDeIFlbCQ6c/0QoQCBSxaKvZNNxaHw/GH24PmXdEY4rAVn1iMQnBhNpRZQFO3ArIuDpBoLDaPJjhWn3kAP7SabRaKtNtIyk4DlmSDdiugg5AGVruZCdqLLa+k9gGDvaTElCVFJRBk8I5V4/pMoQBiqTlz0VQxZpEkS/BqZHiNhX4tWC6lakAlBlmBvATYSSZEMICJQCLTf3qivLnWXVPlSfpxkh5JauPgb2ZzV7W2MqHWoOBSZU+V2xoHQnu8vbjGjTRLwL7JXS/5N1eihvYyjxm1uDvaGqjI5hqCwkWYbXa7mSYoE2nCU0qwgUoEo22g1WNTWpHVFmDZOMNuDDTEvU1Nzc1as6MOVfg2bWLKKnzmmKUOKqQnYAGo3RGtkarVliaOlWJ2WfXxUTcdEDyq6tjULlVX14r18VhluLjM29q5WeVsSK3AJeJl6yoqMeCGxfsSubJjQ+CxIVJDY3OS80NTYmDTxLhMi1dkieExfGf5RD5eYvCnGIb4F+9tP6dCV60kwplBTIzWwbEu2ttIosdpqoGOhsrsUm2jucsW0hyVDc0+r6XFvtlnCtdvjJV7y7xoATN2i40gs+RYSYs93Zo3U/8/s3q5iVi4BQifnLmcE6KERC4UGvVDBU+VnmR4SWPxtlfgqMdL1Lub9WMOiqEtFCgO2c1BOmiHRBneo/PevtkZw6mrIgF43HgdjH40QruMTouFNpYYBOCyW/GMpa/wnaOZi+v0t5as6c3PSX+W9PlrpEvk0pPXG/N/8oZn+u7u6ZfO3DkWOQ5c/OvmVcnqSxvZswP/Hhd+PVd6ppZf9/HPvvpp/o4dz7w/dOyuVUt+5z716OC+8oYXP3X2rxFLC0ufOHH9P9e+tnad/WT7J1xNdLzz4y/Ia0s/m6pwr7v4I5Bsua9qqKSp6f2ntmsrifBn6/s/6l15sPrnf37rV3tPjXx3wyFof/5vv71iuTN8Td+2+qE33aem7+ijXpbmpn37r9zzAO90VDys9hfkVz3/x1VNQzvyptCF7/89byB3hfee6L9OiIOrc9m+jd9ujK6/3HP9fJzs3bCn9MZ/lRvvbCU+atwy+8Hx0JaZL7e9Yh7q2v85+od10Nby+drdn5Y+t3UFs9Qy2SLuHPzaW/vg7PeczGvC+cDBs8eXX1+2/v6pgsuP/n7nHRMzuz9oeWzdue8sl/9Umje5/4m+viPP2vpfHGLe+PKF1p3mCuGvQ6/KK5RDIxv8vZdfGti9bHntO7PON/ddvTvaltxbby55e3vHvdoBcuvR3H3j/YZ734v+cnLlQOPbF5Y+/ZeO0zfGf7B18Nll5650XFs7EMrLGzo7J35YfFXsOtQ7p6/Jue/w2nMXZoNrT3/1mmdvxVz+3J6L3ZetS5dfLXqIfPDpi9L9r++zJbtXv/vcj8GeD+sLIm0bLn2RG99f23je+J5U8NYP837x7l1Y7bm5JTnanIMn8nJy/gcqPgKj
|
||||
@@ -1 +1 @@
|
||||
eNptU29QFGUYx5wIUyY/ZMWHar0ZKp3bu13uuD9gxHHkpUgccBkmgu/tvtwtt7e77b6LdxglSM2U0swOqEzK9MfjLq9LITGaTPtQFs1kMjZDkijqJHxIbGrSGG2g987DZHQ/vfv8+T3P8/s9T1usCcoKJwoLEpyAoAwYhH8UrS0mw1dVqKD2aBAiv8hG3BXVnv2qzI3m+hGSlAKjEUicAQjIL4sSxxgYMWhsoo1BqCjAB5WIV2TDo7u26oIgVI/EABQUXQFBU3lmPaGbC8KWjVt1sshD/NKpCpR12MuIuBMBJU0eyPNEEBKAaMQQBPCKKiK8EMiKrmVTEkhkIZ8MZHigspA0kX7ABVQyD9ehTJQ1CYdgUMKDIVVOVqEMVEvMDwGLxz6fsTTiFxWk9d81yiHAMFBCJBQYkeUEn/apr5mT9AQLG3iAYBz3KMAUV1o8AKFEAp5rgtFbWVofkCSeY0DSb2xURCGRnolEYQne7Y4nRycxIwLSBh1zfRjdYcy8gFs2WQxUX4hUEOAEHnNH8gC3FJVS/qN3OiTABDAOmVZVi95KPnhnjKhoveWAqaieBwlkxq/1AjloMR++0y6rAuKCUIs53XeXSzv/L2cy0LTB2j8PWAkLjNbbAHgF9t8m+XZKHGtlIikLSdGD86AhksMkI+IK2ofUwTkCeSj4kF/bT5vtH8tQkfCywu1RnIZUpS2CxYI/DsXS+/VRRdmc1DsjpVg27ZjHr+oJ2kpUMIhILglBmwtMeQUmK+Eq9ySc6SKee6rU75GBoDRgpZ6f24oY41eFAGTjznvuQzx9RCTHal/hdz1F01V0CWdRnSG6zOYoAeH1DkuTy30kRDK8qLIkwhcIydSwIaSNEnZLvs1szwdsvt1M51tsXjND2/OslB2Txlqphv1NHNDitIEmfKLo4+Eh52rSCRg/JKtTlGix0g0vOsrXOBM1ZJXoFZFCeoBPiwiiAKPVUMYqaPFUabzXMozi9CrHBm3Axpqphgav18bCPAtgAFmC12WOntvjR5JHkbr0ViyBjE0nFmx5ckdWRupbuK6yPnC2eMnsynjdX5fazTlouneYHlpdWJB5bnPOxYnOlpffz/3ldG7+8vGWM+c2V7SN1y/qoZv1/T9P9B2fGWl5bebPy+oXN29M28fEolVTT5y3VtR80PFs+coqNRu0bo8skuKVJ/tBa09obY13V+31gTHD9h3BOmFk75bfp25kLk60d5zp+zc8s8f/d0F4svLSu5mvPGObLhe2+bhFjTmbu2qvhk+Nf5NVFwKmQ9J7npUvfTLYN1kYr11/32JzieRqLjvw9pVG+Y/fBtx9F648UtSara3qLs593HH14sPktoUdKzqGfW8NudZ8P3Ry49kHm05X9zzd3LXkCHt9afY+hy6WuWldUceFLxPa3mVjnQ7X8Gf7iJFO7uLyE7ujv1JTEzX6YepRPel2vX68+c099ffH17zx2NABavcLVx7a/cB3X7efLnvqYFbZTyuWdU+e6vLtHPMVdg98+87la58X/3D9OXT4WO0MZnh2dmHG0WulN/9ZkJHxH+xyZNQ=
|
||||
eNptVH1QFGUYhxzDRi3I0rLUnQOyyXuP3dvz4BgtTzBkEA7hciATfG/3vbuVvX2X/bjghBoVZgJMW5qxKSYy77ijG0IuFC0/pkadMZWZJv8orBynj2HUP0qzHM2il09ldP/afZ/n+f2e5/d73t0eCyJFFbCU3CNIGlIgp5EP1dgeU1CtjlStKRpAmh/zkVJXuTusK8JQpl/TZDU3KwvKggVKml/BssBZOBzICjJZAaSq0IfUiAfz9UNtW00BWFet4RokqaZchrbazKbJFFPuxq0mBYvIlGvSVaSYzCYOkyYkjRy4kShSAURBagsppqAH6xrlQVBRTY2bCAbmkUjSOBHqPAIs8EOhRgdWQkCzdDaB0lBAJvNoukLwaQvdGPMjyJNhLyalRfxY1YzEfQPshxyHZA0gicO8IPmMT30hQTZTPPKKUENx0p6ExhQy4jUIyQCKQhD11wFVg4IkkrmAJgQQadX4pMTlri4o3LCmJDoOavRBWRYFDo6WZ21RsdQzMS3Q6mV0fzg+qgkgQkmaccg52WZWaT2xQ6Joi81hofvupRYh6Tgqj8WP3BuQIVdDcMCE1UZ0vLj33hysGl3FkHOVT4OECuc3uqASsNumTano0uigRiyv9H66ieBdOtbCWC05iWnAar3EGV1eKKooMeXBVEmcGMkC2g5o5tA0aKQp9YDDhMH4mO6dFFBEkk/zG2GGdXQrSJXJBqMdUVKm6er2CPESnTsdm1i7fa6iyU3YGcknrhrH3H7dTNF2qhgqFCFeTjH2XJbNteVQBcXunrwJEvcDXUq4FSipXuLUmsmliXF+XapBfDzvgesSn7hZQOCNo+S9mmaKvF5vbQUKWiX7entdTijEhoLlzs/v6oIVH5SE0BjtaN1QBuuws8t51gOQx8sDmyMnGzgcVgZ4rNYc3pbDZNt4ezgoQCPOWBjKh7FPRPs5L+Ag50dgXBojll9Z4iwuzOupAGXYgzUVuKHPiEhYQtFypBA3jDgnYp0n66+gaN7LoMxZaRxwMBxrYzia53mbjeOsYDVZm0mZpmSIjN6dsd/ANmKFQo5OJbctaZuVNPbM4A1nzY+r5jSPtMbPVh1t6y9Z+EfmO+cfLnseFT32kye6eIj9q0JZ8YE++NnIse8FqmvptuM3b2RoTpw4+eLAK2/82dXx7dlrhzF+aeX80PBx+5Lfd33E5H2Y8qr7ibDwCPtspnnuvPDuLRXMiSrzTHPi0ecqL8zf6Gt4b+Cfhjc7Ftxa6a2YvSzhufXvdfv6yzd/3lTUkrJQmPfVly5r+p32XXuNNG/hby2uO4ODm5uzv7g+6/y+1rLXryWX3Fl7sNBYtWjw0rn3q2K2/Nuwdgl/Ib/UdyO5c1WBa757755Oai5dr2+mbqd6n3p8x3B/U//w38sDKSlPdxfO7sx0preAupnh1NP5zRdTnvHSja2dPyxa8e5q1zeJdZc6Th1IpDYUFPVsDLLmK/7XZgxc6V56pOrYiau754TPlL7N76P3rBWXFaf/Up16OTPtu6bMWKp13VXfmdmhxe0bXlhUe/J69HSf/WA4vvjr7gW9/z35K27I4EZ87Tcah48frhohqo+MzEhKOed+i30oKel/M3dwnw==
|
||||
@@ -1 +1 @@
|
||||
eNqdVWtsU1Uc3+SDssSoUUNUkMvEaHD39t7e29eWGreWsSmjZS2w8rCcnnva3vW+eh9dO/CD0xgRFS8xSqKMx0qrdQ4IExA3o6JEIxoFNRlGo4mPmPhAIxFExXO7TrbAJ++H3p7z/5//4/f7/84dKOeQpguKXD8syAbSADTwQrcGyhrKmkg3Hi5JyEgrfDEcikSHTE2YWJQ2DFVvdjiAKlCKimQgUFCRHDnGAdPAcOD/qoiqYYoJhS9MPLahUUK6DlJIb2wm1mxohApOJRt40agKMEMAQgMyr0iEbEoJpDU2EY2aIiLbbup4/cA6vCMpPBLtrZRqkCzlIg1TSyi2r4x3GfzWDQ0BCS+SQNQR3jCQpOKWsKMdi6boB8ppBHjc8JZiWtENa2RmC3sBhAhHRzJUeEFOWS+n+gW1ieBRUgQGquC6ZVQFyKpkEFJJIAo5VJo8Ze0DqioKENh2R6+uyMO1PkmjoKJLzRW7NxKjIhvWaAgX0drpCBcw1jLBUJyXovflSd0Agixi8EgR4HpKatX+2nSDCmAGByFrPFqlycMj030U3drTBWAoMiMk0GDa2gM0yc0dmL6vmbIhSMgqB8KXpqsZL6ZjKYahPPtnBNYLMrT2VGk4NOMwMrQCCRUcw9pFj0zhIyI5ZaStIYb1vqAhXcWTgx4q4WOGqQ8UMRfo+Lvl2gjtDt03ReKXdXOKQcyLNR5Nm00E4yFC0CCctJMjGK6ZdTazLLGkKzocqKWJXpaG/VE8fHoSU7F4ivYyTJtyBvGVwGUJH7cJx93Y5eM5JVFeVXRE1qqyhnvI7kntkJ3BA5PTRSpaCshCfzWtNV5lvq8/38dDk+fTuT6J9vVzrJBAJkyO1o6ommKnwQWRkm4NOX3OkZplCvsK7pUmGZqkmSN5Eg86EgVJwHhWf2sC1q2ii6bpw5c6GEoGYamXObr6vD7dQ0MSJs3OfTEM5/P5xi7vNBWKxS4+j+fITC8dTa+GcUr64UsdaiF20/pwfsqbFHhrYiFexHm3h3ZBt8uFmKTXyXsTTg4kII+c0OWBnkTyVSx+AeIoNpmqohmkjiC+rYyCNdEkgbytMz/LuFg37rSFEGQomjyKmImgYvegtxCqhkQF8HsD7WQAwDQiI9X5s8rB2LLWrs5AJYKLDChKRkBbT9XPisdhMp6Q/HRHpA/mvWpvFHa3i+YyJpFd7PH0rHAFdfeSju5YXI9qHUIiLrRDkvE4fYzH5XKxJEPRFEMxZNCk4tlsXvMtWZ7N0HGxk5MKS3v41tiSsHul2C73JpS8V2hzxnrFMMoloRhd1RvvRRSr9MhdIOsNLA93t6+iY7Ivn08AaXkgkAyJrbgbYKT9jhYCz6aA8fXXFEJihZC2PlzN9JQ+Wgi+ioGfmnkbthAd+KIPyWKhhYjYYCL8BhKKCAbyL1NkNPE0xsDMCbyfW8kqYQlkc21L+6MhxpmRcj1UXBJXOQOY1tXBpaxB9QfYjmhvbBoINMaBruHgpjlvdQovlv4/qzrYQ04XPBlSJ79oZVnRZSGZLEWQhgVkVaComDy+2DVUwpx3t8asUS/P0ckk4GiIEAehm2zDV+ZUtP+uh6L9VSgDEc9YDloH0qy/sZnj2MYWQgJ+rxvLqfrde7Bkz6Sceqc+PH/zVXXVZ5b41JvLjtLXBn851/D+98Xegd1t8kfXv3DVplva7tS2dyXUsZHx1qcrF558viSFN/wKBlO5XN91cO2KsfVrDw7wKVO7MHqqaccHv60ZEz1bzvz00rl15IlDN95tHPkuq3715/UL561jN23/9J6GD+rvu3KAKJY2/0it+6YwePXOow3GPtczW5+LDYZ/+Wzb8ZNN87eTi2YPzvnRn3h8/xenH3myNDD//n2FjdHm0/dzb2w+ecexw4F5N1BjqwYfXnDrjl3BwPpHTv2QaJj7yficXdt+eOXWM38LW48da1573trx++qbvv38xIOn022VO0Y37jz/6P4tYOifeKc8L/fMvW856AV01zXZvzYenT33pgWz/daBs/f2xOpn7/n7G2Jtw0Sbmftw6L3ks7ftXTz3YKT0asemYwt/PjS4864Muln97MWv3z56+x8fz8fQXbgwq27pE9edBVfU1f0LdzdoqQ==
|
||||
eNqdVXlsVNUaLzQIUZOnL/G5gV5G8OVp78zdZm0mtp12Smk7085MKYVoOXPumc5l7ta7TGeK+CLU5UUN3qYxkryEpe2M1AI2rYKFGtG4gGiCC6EaMS4Ed1/Ce7iDZ6ZTaQN/vfvHzD3323+/7/vOlnwaabqgyAtGBdlAGoAGPujWlryGuk2kG305CRlJhR9qCUdjg6YmTN+VNAxV9zkcQBXsiopkINihIjnStAMmgeHA76qIim6G4gqfnc5ssklI10EX0m2+9ZtsUMGRZMPms6kCTBGA0IDMKxIhm1IcabYKm6aICEtNHZ8231thkxQeifhDl2qQrN1JGqYWV7CebmgISDZfAog6qrAZSFJxBViKrSk7tTmfRIDH5W0bSiq6Ye2bn/B+ACHCHpEMFV6Qu6y9Xb2CWkHwKCECA43gNGVUhMMaSSGkkkAU0ig3Y2U9B1RVFCAoyB0bdUUeLZVFGlkVXS4eKVRDYgxkw5oI4ySqGxwtWYysTNB2F22nn8uQugEEWcRQkSLA+eTUovzQXIEKYAo7IUusWbkZ431zdRTdGm4GMByd5xJoMGkNA01yceNzv2umbAgSsvKBlsvDlYSXwrF2mrF7xuY51rMytIaLJByYZ4wMLUtCBfuwdlH7ZvERkdxlJK1BmqGe0ZCu4j5BW3PYzDD1LUOYC3T8zXypYXaHG2dJPF1241At5sWaiiXNCoJyEc1AIxiKcRK0y8eyPs5J1DfHRgOlMLEr0jAWw82mJzAVdbO052HSlFOIHwlckfCpAuG4mkL6uDFJlFEVHZGlrKzRtWRkZlLIhtrxme4iFa0LyEJvMaw1VWS+pzfTw0OT55PpHony9nKsEEcmTEyUTFRNKYTBCZGSjsHxuPeVJLPYj+BaKZKmSIqezJC4z5EoSALGs/hbGlfdGnJSFHXwcgVDSSE82HmOKj4vzdXQkIRJK8S+5Ibzer2Hr6w064rFKl63e3K+lo7mZkMzkn7wcoWSi92UPpqZ1SYF3ppegQ+dLpZ1cpDj4jDOMF4WxIsviEpAlmHdHs+LePIFiL0UyFQVzSB1BPFuMrLWdIUEMoU587O0k3XhSisJQYaiyaOoGa9VCjXolYSqIVEB/H6YICGASUTO9J+Vr+0IVTc3BEaiOMmAoqQE1P/hgps6O2GiMy75s/VaeyYRDMqNQTWaFnpqBb7V093U4wm2tm2sFzrjmmK2dToTTrWDpN0czeBkGSdJ2yk7nlIykIRM1mzqcbbQNQ0ybBfYVLfQ3hRKhRqYWF0rC72U2KyC1WGvN9YekutWoTrV69WgnA5G9HgrQJH6SMToMatbYyiaashsVJyt7cy6auhpCTe2grRKi71rA5ybZwolAiPpd1QSuGEFDLq/NDYkHhuyMDRuHzU7NJUEXwTGb5+/IiuJVXjXh2UxW0lECwgj/A8kFBUM5A8pMpoewMCYaYH3h1J07cZYPO10rWpLsiE+HKrrCNXx3XxNZF04jbhwd2e0c218DdMA5yDjoV0kVQLHRXGeYmteSv3/zOqFteTcLUCG1ZlLLS8ruiwkErko0vBUWSNQVEweb3sN5QJBMlLdYU14achydJx1JRKUh6dpsgbv0Vlvf+6MocJVkQcibrw0tMaTrN/m4zjWVklIwO9x4RkrXn0P5gqNKne9tuDB2x9bUlZ8yh/vp5VXqOse+vG3q9/Sq1YM/BWlFr+/q+m7qrY2a8zxrz1w/flFNyy/sOlMX//OyJ7Gh3/9furwoXO0rW91tV6TfftJ95rm28a/PSMcuG/42QPfgnOTnxw9++rmQ/cN3Lqt6vOd1EfL1N9am18URhcOBH98unLDkiPOjtMrv2Leerdu8S13VC3qQN3MDvudpyb3bj/eb3SvORnU/sP9/Yfrl/ctPaS8+cyi+0/8+/hnO1aXnx6/OvmAYOvzDdY8xPwwXJ+zXi//InjHl/b05NLyxeibjiW59cPv/O+9M+FjsZPb995zrnFqYN0vk+rLh08c+WBwbGIw/+iT2+Td5+9a8fw73N+u4Xbs3Ar6/5s2Tn1a9h4b2Nr0BHfup4c7lpdtjz0wcd2xm/dXX3sMbBq94d3Hq7hbfu55av3p309F2i+O3V52dsP1tWDZkqP7zy77cGnf4vPtH6HX7t3w9ZHvVj6ycOVB9aoFK1Nt/+z9+i+37frH6j13qyePfn/h4+3hE26M9cWL5WXjb4CnLiwsK/sDSUeFGQ==
|
||||
@@ -1 +1 @@
|
||||
eNptU3tsE3Uc3wRFGLKpOIcQuTTDsLhre2vXbSUIpYPNwVjdinW8xq93v/Zuvd7d7q6jHS66Bw8zEjglEubGa31R92oQpkNChCwBo5g4CA4nM8BgoCQoII9g8NeyIQTur99935/P5/utD1VDUWJ4LrGd4WQoAlJGP5JSHxJhlQdKcmPQDWWap/yWkjJrm0dkBmbRsixIRo0GCIwacDIt8gJDqknerakmNG4oScAJJb+dp3wDO9ap3MBbIfMuyEkqI0Zos/SZmGosCFlWrFOJPAvRS+WRoKhCXpJHk3ByzGQTGRliAJNoXpQxgYduDNh5j4zZIRAlVe2qWDGegmwsmGSBh4K4DqcB4/LgWaiXVqfNiZWUoVtA4GSPGOukVWtrQzQEFIJ+LiHFT/OSrESfgtMFSBIKMg45kqcYzql0OGsYIROjoIMFMoygOTkY50uJuCAUcMAy1TD4MEvpBoLAMiSI+TWVEs+1j+LCZZ8An3ZHYvBxxAonKz2msTk0Fh9in0Mj6wxqbbcXl2TAcCziD2cBGikoxP2HHncIgHShOvioskrwYXLn4zG8pASKAVlS9kRJIJK0EgCi26Df/7hd9HAy44ZKyGx5ut2o8/92OjVBqHOiTxSWfBypBByAlWD0EcmPUiJIKx2uNeBaoueJ0lAWfTjJow7KHm3nGIEs5JwyrbQR2bqwCCUBLSxsCKI02SPV+5FY8IfjodEd21uyeEzqzf58JJty2Ep7MjEiByshZSy2JBihN+qyjHodVlBsbTePNrE+U6WoVQSc5EBKLRzbihBJezgXpCLmZ+5DZPSQcIZSvkXvCi2xoDTHvIQp9HIOU3nVkvwam6m0Sqo84MVJlvdQuIyuEOJxsF5ZGcD0kIR2u94BCSo3R59DGewIuJ7IgiBP58jNJtuqGaBECDWBOXneycIu8yLcDEga4mVxSpRQfvlSU/G75vYP8FLezssSbgVOxc/xHAyWQRGpoETirdFeizCI0ktN5cpXuZRe63BQlCFXn62zG/LwBWhdxuh5BN8fO4r4tdchCURk6ku8PrPpxYT4Ny5fKeF/1U7uOzJpz2J10VbDsUl3vgYf1T23SjPOmFzH9h9MbbpZWLna8ueh5KLBa+lba2ruz7060HdymfqKMDg02NJ//87N4VsTh2rfPHvst89Gzgxvv3HypC71tciGxNa1R8uY8afDqcdb1n4yp2Xl8NmMi1N6Nl5LO5/B87d7uv7Z+G/rjU6YlS6d7rn8esGxoeFrw+HBgqmN4ZT5BakNOwgTnTtdn5b/ZXNlb/I8C37wzOcTssEbmGXCW9Nqi7fMKPUnsvkj5ZbiuZlVV9V7V6eszZ57vf7nyxMvdYc3vpM0+3a1oj+adLf5nFhU13t+Vlh38FLTlWnpJd7ozEad59DRWztnTV54dv/8BtXLTfypqP70eHNpRX9DN3C9vWZqrhT8rmth+oHCjoyWFMbWXztj0ZwjjS/tDrxA0sLFfTeya4+betMK69btXlPeza5PSqCydnZ+vGukfmVq3YJw74aMmb90XOjyfT+F6HP9/eFfrfYt09eX2ye9OtJ878d5EanSey+5+73C509tjxoLMpYC0ybbJiI0f/rh4MA384YOl124Oztt24llU/bNWZ4q/rH+/E+bb9pO7Ct63xfIyzMWBX5vpJd1bNthsyx/peLTXa3NtnYK0u1t2yqW16sou3VFEpL2wYNxCaGA84vLzyUk/AexLZt7
|
||||
eNptVGtMFFcUxtCSKlqosRHtD4bVPmKZ3ZndZdndii0siiiPhd2UajV4d+ayM7A7M8zcQYFaK7aagFoHbNWaGpF1wQ0iBEWRWpVq1dZXGm3VWLRNg9EIplKD9VF7QVCJzq+Ze875vnO+79ypqC+BssKLwqhGXkBQBgzCH4pWUS/DYhUq6POgHyJOZAPObJe7TpX5i29yCEmK3WAAEq8HAuJkUeIZPSP6DSW0wQ8VBXihEvCIbOnFL8t1frAkH4lFUFB0dpoymuN1wyk6+8flOln0QZ1dpypQ1sXrGBE3ISB8kCfzCBKAUDhRRoQkQj8BPKKKCA8EsqJbuhDjiCz04VTGB1QWkiaSA3yRShoxCWWiEjEcgn4Jz4RUGXNQemppPQcBiwfuCnstwIkK0lqeG2IXYBgoIRIKjMjyglfb6S3jpXiChQU+gGAItyjAQZW0UBGEEgl8fAlsXUIqCPCCD89GIt4Pcavajqxsd35a+oczs4KPQbVmIEk+ngED5YZCRRQahyYmUakEnw+HBnQhsVgC0vYmD7dpcJZiSwSC0ptteqr5WWofwB0HpcF4x7MBCTBFGIccslsLPi5uejZHVLTtmYDJdo2ABDLDaduB7LeYR0wpq8LAoFq9w/k83VDwKZ1JTxv11pYRwEqpwGjbC4BPgS1PPHhSEsJGmkjKQlL03hHQEMmlJCNiBq2WahoW0AcFL+K0OtpsapChIuEthiuCuAypSkUAewlPHq8fWr1t2XOHN2F1IBW7qh1wc2o8QVmITCATmDiBoC12k8meYCLSMt2NjiES9wtdanHLQFAKsFMzh5emnuFUoQiyIccL1yU0dLtIntW+w+/5FO12leQIljkSci2WzB7RmZPqRIUZ7U91EWUvEPiyQdqBuotTTTaLKYE1eUjoKWBJs82aSNpsRpr0GI1W1mylE82spa6EB1qI1tOEVxS9PrjLMYt0AIaDpGtQGq0+dV5Wcma6o/EjMlf0iEgh3cCrBQRRgEEXlLEbWojxiSqL11+GQVyemzxP222jGZOZZm3QVkCbPFYjmYLXZlimJzIEBu7O4K9gObZCxkdHR/XGVr0SNviEs9XZ4jkq+ujBn45P1Hedb+uYsP4N4u/sadOkDyq2flWTMb71RMbGSa3jzn+6+K/dkSHrvC39fZv7p15pv7Cvc82lnsSJefce9N03xAp38vqLux72HuxtkqZbb/m2UK6GyXbUNTqqNNpdN59QutZcvnp1TF5UXMK+S033oqN4tb39sr9ts/d0xzcn1p18/Urv6bUr9/7bI0/oJcekuF/ZmlR1+Ig1pcE6yR2TWlU9Z/+r7zvJX/unRBV2nkveGpEx/l71jOKYKeqipJSfKwpv/BGzqmD5/OQFG9APaest46gv6sq4ms/kosjc0bEnNvxTeYx21hx4Se28NN3ZPPVuau3ps/a3y06N3z92T7yzdvaRmzF7zjgSDlMhw/VZF5rbVnE5i7pTF76VU7jAEhvdek29Wbb5zLhw7vsVtPtC39m71V0BInNs4FBPrrnfEee+s+74Eu5r3y0uJy2Ljasua+iG22I+STf0RRxq+bPgv7zMa8U3NnWr6Ya1+yoDrvUb61aWvzP27NV1jm+THngqQ7MTujOWnfpx9c7fr3csLpze8/IKY3tnXEL53JTeZOeDTbeL8pyTI279cvu3yvcm8nPaEmcQO97Vt3Wdr+0tzo+MqMq4f+xQd2Pkyu5la2qS2srL7A9HhYU9ehQepj6sCEaEh4X9D/5zr+U=
|
||||
@@ -1 +1 @@
|
||||
eNqdVX1QFOcZP9EkpU1rRDM41Oh6UTDIHrt3e19cz3ocCIzCAXfgSWKve7vv3a3sF7t7B4ehMWiNtmPsoqnVdho/4I4wfEigiDE4dqqpydBMJtEOpB+2aWNorNJqM62Jgb53HBVG/+r+cXvv+z7v73me3+95nm2NR4AkMwK/oJvhFSCRlAIXstoal0BDGMjKnhgHlJBAt1e63J5TYYkZzw0piigX5OeTIqMTRMCTjI4SuPwInk+FSCUf/hdZkIRp9wt0dPzwTi0HZJkMAllbgDy7U0sJ0BWvwIV2q8QoACEROSRICiIKgENIvxBWED8gJVmbh2glgQUJy7AMJG3LdrjDCTRgE1tBUUENOiOqhCW/kLDl4S4O37IiAZKDiwDJygBuKIATYXLQMIGF6cwt8RAgaZj6wfaQICtq7/xk+kiKAhAd8JRAM3xQ7Qk2M2IeQoMASyqgC2bAgyRValc9ACJKskwExGZuqadJUWQZikyc5++QBb47lTGqREXw4HFXIjcU8sMr6qALBuEoy6+MQtZ5BNcRFh12ugmVFZLhWUgjypIwnpiYPD8390AkqXoIgqYUVWMzl3vn2giy2lFOUi73PEhSokJqBylxJmJg7r4U5hWGA2rcWfmgu9ThfXcGHY7rzP3zgOUoT6kdSRnOzLsMFCmKUgLEUE9gvbP8sIAPKiH1FE5YOiUgi7CGwO4YvKaE5dZ2qAUYvRxPFdNJ1+ZZEf+oyWwvgrqoI55QOA/BzYiLUhA9picQnCgw6AsIK1JS7ul2ptx4HipDv0cieTkApSielT1OhcJ8PaC7nA8VfCQhOMwmET6sUxQ0iYIM0FRUarcXrZ7pIrSsaGCmulBBCpI805x0q44klW9sbmqkqTBNhyKNHGZtJgyMH4SpwGDqiigJCTcwIJST1Xaj0WLoTR3Nkt8Fk8VQHEMx/I0mFFY6YBmOgYQmf1O9nLiLYdjwgwaKUA9g18cJLPmcn2shAQ6qlnB+H4awWq1vPtxoFsoATaxm4xvzrWQwNxpcz8nDDxqkIE5icnfTrDXK0Or4Grjw4Sag12MGvQGzGHHaBEgDbcIIU4DETAHcSBjOwu5nKIiSUFOEYwWVAQUHlxJVx/M4sinRaHYDbjSYYKY2hOEpNkwDd9hfJCRykG2IKAFWIOk+5ybUSVIhgLqTBajGi7ZVOMrLnF1uGKRTEOoZ0PbhgoU+HxXw+Tl7hc9fy9T5S32F1RhR66T1wAvcmLs+Uu4p21Fo2rqNrm6y8GwEgqC4WW/FzUajyYriOkyH63AUc7DeUl2DvnSrt4hqCoU8ir6q3IeZiRqm1gkUhfYKdbUyXlYbDpbiUV0jHthSEyqudkTDleEStqS42FVY1iDpCjlno4Eobja5OaJGCMJsSCVkz7chsDgZyK891SIobBE00SDGAmy2QWwIneTArps/Dm1IKZz5Lp6N2hB3gkwA3yQH3HB42ysEHowfhhyEIwxtr6PqTMXVjUa+wRgtD9Nuk6NeEvVNIu4rM4UcZEUVtQVgeKPZUDSXBINRj2IpHqCYlmQV3g/9/4xqyIvO7XjUJc583OK8IPNMIBBzAwk2kNpFsUKYhpNdAjGoebVjmzpooQksEAAWykpSlgCg0UI4M2fR/jcf2hOfhTjJwhqLUOpAyGDXFhCEQWtDONJuMcF2Sn4CX4wlapIPXkpzrfrhVzTJZyHb5hJ+hz1+afLusj35IxbH+wh67Nl+cuMZ8saUxj7qvHB5sHtT7Dm1N3PZl7d2r4j/7fR36F2uyWBbs2vprtFXxrP07lPHYp99cOPgq6vOD37x2NkD525tn75564aTO3IzJ2jbqX789pKhf9P/mjh29L3Bn195ZMPl7IzctZ9+dPvejoazddvzHjuRvvYuPdBTcWTgqufc8Nc/uPvR88aCG396vGxor2+pZlf8z1vsUtkLrxtH71Adq5cMHu7x6RbQ+H88b1de3K+t//GrRUu9+18aEpYPZncWrqlat79uo+5E4/nRr02tPL1k4/LrE1rHx+uysnqJd48MtE7+7LeV4SWVxy8Ob7721it/Z3sQW0lb5r2B7LGMTRPq5h+ktUifdcZ6CxYP5hx98rl/LD7w4s19Z35xSx471Lq21nV89V8fufbWcfpK//dbJsZWejd8K6NUc+Cp33z1PDnx1PpLhgVu2+Qd4vrKHWc6Lncezry3IcOUvjT3R6aCoWDa2F9ettSs9xQQT144VJjz3nefX7O4M/26dmxfttp7dk/p+23W3Nbs6vSekkPIqsJFR0w3F33+RIs1svugQlxZzvx+2Us98RVZb+ZUBW83ZP7q0WDfNbz6anpsRcMnX3pf+F763sVDB8j1p6aq+1bYXjOrlWnxJ/btNX3zyifpFyZX7/rDt4++8+HyY7XmrMjF9GHvSnH8mds5//zUv8y47vOtsbs1U4/y7raTd27n/vKow+2Ovv5uTl/gi+m4sPobd6xD01XZTz9zaGSd8afv9Hf8RHv819s3uKbSNJrp6YWakrrXrmYs0mj+C91Z35g=
|
||||
eNqdVXtQE3cexwd3Fp8z0mp91DTYuaGyYTebhBBEB0JA5CmJAvYc3Oz+kqzJPtxHgOATtVpRe+uJelrbUZA4FLCeUK1o1XpqK56jrcphxbNnLa3a4mOQ01O8X2I4YfSv25k8dn/fx+f7+Xy/3y33e4Eg0hw7oI5mJSAQpARvRKXcL4CFMhCllTUMkFwcVZ2bY7VVyQLd9q5LknjRFBtL8LSG4wFL0BqSY2K9WCzpIqRY+J/3gGCYajtHlbbxZWoGiCLhBKLa9F6ZmuRgJlZSm9T5Ai0BFaESXZwgqXgOMCrCzsmSyg4IQVTHqAXOA6CdLAJBvXhejJrhKOCBD5y8hOAaPSLJgp2DdqIkAIJRmxyERwSL/S5AULCsD6tdnCgpDf2B7iVIEkB/wJIcRbNOpd7po/kYFQUcHkICtRAeC4I0KLVuAHiE8NBeUPPcS/mM4HkPTRKB89gFIsfWhcpBpFIevHxcG8COwNpZSWnMgSCS0mNzSyGjrArTGDAN9lkJIkoEzXogRYiHgHhq+OB5c98DniDdMAgSUkupee7c0NeGE5XdWQSZY+0XkhBIl7KbEBiDbn/f54LMSjQDFL859+V0ocMX6XANptUY9/ULLJaypLI7SPmBfs5AEkoRkoMxlJ1oQy8/HsA6JZdShWHaPQIQedgfYEUNdJNksbwaagHOfu0PNcqunIxeEa+FjalOgbooR2wuOUaFGlRZhKDSolq9CjOYcNyk16vSsmx15lAa2ytl2GcTCFZ0QCksvbL7SZfMugFVa36l4EcCgsNqAvBhGyKghOdEgIRQKXUFSN7zCUHSU/Y/7y6EE5wES/uCaZUjQeWLfSXFFClTlMtbzKDxPh1O24FMOhpDLrzABdJAQAgjKtWYNl7bEDrqJb8WFosiGIqg2KESRIBceGiGhoQGv0NzCn31KIoefNlA4twATrRfhwavL/taCICBqgWSvwiji4+PP/xqo95QODSJj9Mf6m8lgr5oMC0jHnzZIBRiFyrWlfRaIzSltE2GN0V2vcGoi3Po4wzxFEUSqENH2YFdq9WSlFZnB9QXcNBpEkYJqMnDpYGIgIRLSSpV2mIYoiQwaIk4pscNsNIEFc2SHpkCVtmewgVqEBNUvAA8HEHtNaciZoJ0AcQabEDFn1KYnZSVbq61QpBmjnPTYOOVAWOLikhHkZ1JTPXN0ZuLc7LF/NI4S7bsJswsmeop8aVnWniLU2P1YLIXnSkXoq4sBIvTYdo4oxHXI5gG1cAxRdLlTAnLTVvAMVq7MX1uqY0pQnNJa1pWBp6Ke6y0xpuRnikasyyOvDxZnwL4gplsbo6DNWcnZZC2DLMnUwDmghRNYUlSqV7rnTXbrc9cSBZl0njSAiHdLhDOTCLeNzfXkFIISyQkV2Jsggp2LA1JTwzNDQLnBglMTZwJ7Z2aBBUVJCZR039HJqhmwCWfw3pKE1TWAMMA/hIMsMJ9nZjNsaBtEyRG9tJUYkmqRqPnM7xakSWtC0k+2UX7sBm+uZTb4mSsKTZu4RxristamGec1YcZFOJBQ+QYUJ0x2JovoP+fqD4vQPquASSHf/4287OcyNIOR40VCHCqlFrSw8kUXPcCqIGNkJdUqDTGYySuw4ABcziMuN1IIclwkfZG+9/SqA68K/yEBzael1T2u/BEtUmnw9UJKoZINBrgjAXfectrAo3KOk8OvDOpYkhY8BoEP8+erbN9v3Z82qjFrfmRD6ZOefivk7cXt4xb9Jph8N7jr80aVf/buKzT7bPNs/+efcrx9lHlny1bVrLfbNxs/NOYebeExx9Z5z2VB5z/2/mryTfKnNNG/nLnt59+6vF3c3fbbz79w69LlnTd4HvugGc/nGiPfXhf/m7ziGtPNlz7UkYSO97eaamZd2Wo6US5796TtpM3H2zf/umn276egFZ+/ut8Z37LbXw8OOHsGLOl9T/nPq5KY9gfWsPDjrU+LjTmHZu+7T0jd9SirT2EGA6sVi9LNVbOuGGr2hblvkJ+i7+Zt/vp5mO+lctWdEVcvbR04tDapvmad/jI+VM2JDfvmqQ6zXC4jahSDWmua4qOj2h5ffHay+a4CGP4zXNnDgyaKKYuX69acnxgyoCLg2l3YkHnsMNR6Fvvr5xfvqfl9fqfR+ywzdn+45OOmSBq6qyn+q4LEZfLlqxpDN9hd9LvH8yPxCnHzFV3BcuksnUDjVVf/Xi2u33rlbyx4drp81O14VlTm6b4Ms4nfJOJZ1pGzK5f+uzRluQtJbUXpz005nd1NZ6N+ktFdWXhB5Gyxt28q3jf/RsXHnZkYavemGk+FbbzMlLxhaljuH9X9/Ws5aMvblVvPD0x7vejMyJ2n+veIHdmZEab7hf80o5P1o1tkkb+fLtt7erj0Wv+XN69FnCFkWMHN308aHTymOhlK5KursubQM24NyCja3TxmXeio+p6bEzzowjH2fGbFpz+XbtL3LnIXtNJRrR8tz36H+tvnXGc3/rttJ6Sg7c+OLDibmr3I/PhisimoqEm8G7r3PWraxyTxoQdvVQ57hNPOj6k4/jlU58oEddPXhrREK+5R98/Oc3t0eyY3BD7fef1+kHbKtJzNv/13rnusRvqTw/3kmsWbSz4aE9P3I5OonNT2pWeSf8mvyqbMOyEb9SwC9MrF8WMjvnw6tLKiX9sWDneUrrmrdY3Zp2ZPNDLZjZ3+C8UDn9wqKLR8ubky3F7sh8vvDcy2NKDwoatcuwtDw8L+y+SEjoQ
|
||||
@@ -1 +1 @@
|
||||
eNrtVUFrJEUUZvGPFIUgyPRMz0zPTNKSQwhhV9wQxSjIbmgq1a+7a9Nd1VtVnclsmINxrx7aX6AmJEvYVQ/iRRc8evAPxLv/w1fdMxuNkV32IAgODNPzXr33vvfqe18fnx+ANkLJW0+FtKAZt/jHfHF8ruFhBcY+PivAZio+ub25c1JpcflmZm1pwl6PlaJrCmGzbs5kyjMmZJeroidkok73VDz7+TwDFmP6xxcfGdDeegrS1t+7002cV856frff7Q9Wvl3nHErrbUquYiHT+ln6SJQdEkOSMwtnrbv+jpVlLjhzGHsPjJIXG0pKaDDXF/sApcdycQBPNJgS24DPzoxltjLHp5gXfv3lvABjWApfb7+3BPf5yceC1RcIg6RKpTk85QonIa1nZyX8veQZtoIzq8+rA8GVlj84bMZ4iMRqlXtb7NB1Wp+Mff+na771PFdTb6sZqKm/evubjUWpuyBTm9UnwcroxxtjtrVIhay/vCQ3ujc0xJhHsNzUp1ZXcBrj2OrnO1nVIf0J2eaWDPxBQPpBOByEwxG5vbXzjDOegcfbTPUTqbzGcr6eW+/DA15fdrPhGg2DYEjfIQVbG4xWB77vd7KhN1i9wfH8GrbNw1IZ8O60g8Z+b57Hlb+hzad4Zxo58Nut34/ogp00pH530h0HtEPxNgCvNoLDUujmXiIrCqChrPK8Q/eY5VmE8UjeCHtLRErDI1phRFHlVpRM2whkXCokPA3dsDrUcJZDVJXRQyMeQYT10xQ0Dfuu3SuvtJlGsCbKBRIY3eOlM1ZTGUkoSju7ig7Q69ItTze5XhiivZkFQ8OBvzrpjwb+vEOFRLpKDhGyPjUONq4MLqWFiIkI91HPEDrbyyFeIlc6jTiCauYQC7NwJsgE11emppG1eVSJZYDFHccOBegorhbzi9msqZYrmbr9wQRBAzZT2i4M/QABGmAap3sNw1TpfVO6tIarEiKHScgD0bS3RDKMjFUad++v0fP5P0vN2sukBr9oNT2dFz1M7ZVa4Q30nGQY+78GvZ4GrQTBv6JBwX9Bg964e0RblkUZMxnq0MgPggFL+sMhjPujyRgmwWg45uPRaMwhgX7CeD+YcBb7w0ESDCeTvbHPg+EYxjzme2NABSuYFAky1K2cwD24R1/QGr0tiQ0+ocXizwb+vN8Yd1BgHBfpLsogx53EdUaCICocICKuOK4YRuxPmW71Y8E1fL73SrXuVAhuqw163Zpt0pc1tzjVoa9bxi4jQvqJqgjTQJgkzBjhRNSSRGnS6ApujcekmYK7UWKZ2TddgmpAbAZ4yt2/c5QCkBhEJUQDXj6g6pFmDQ8tsYq0GZqYZdYueTchM6wdK/mWJftSTRt/e7RDHlTGEsNmaGT22sElAg1ADLgNdMULdiiKqsAMMXFS8qd0DgsXBrr35QeL+iE5WkKZk/tyowWL1gVsZ1xvgkPqXi5lZaMDpoWTX8cIuox299+GuPEvBxvhBAtkRUgTr10HOsfP7iunmuMbAw4ZZmvO7M7/ACbG6BE=
|
||||
eNrVVk1vG0UY5uPGkV8wWiEhIa+9ttfr2iiHKFRtoVGLaqqitlqNZ1/vDtmd2c7MxnEjHyg9Iy2/oCVRUkUtRQVxgUocOfAHwoHfwjtrO6VO2lThhGXL9vs1z/v1zN7b3wSluRRvP+bCgKLM4B/93b19BXcK0Ob+XgYmkdHOhfODnULxww8SY3LdbzRozus64yapp1TELKFc1JnMGlyM5O5QRpPf9xOgEYa/f/CFBuWuxiBM+bO1rvzcfNLw6s160+8+XWUMcuOeF0xGXMTlk/guz2skglFKDezN1OWPNM9TzqjF2PhKS3GwJoWACnN5sAGQuzTlm/BIgc4xDfhmTxtqCn1vF+PCn3/sZ6A1jeH7K58twP391vu/2fBauxjMKJm6q2kqx+56lbcuH360+wliKJ8PkqJGvICsU0VaXqtDmkG/3e77XXJhffDriTGuKB5zUT74acAzTGtJ+mSNsgQWLuWzvBhidjWS0S0XQa4E3v5qatxrm6w8rCftFafv+23nY9SvtDq9lud5taTttnonKJ4vwTm/lUsN7sVZzpjTyTm/0O9ep2pS7s2g/mCtsHnuZRCxScqdoNv6ZSnAOoLGDqPO83auc1oeYGdJLGWcwtMbrrWuYHDsTfnQ27uqaJzR8pGQLrNleHbDxTLTSMbuAMcQ3EtReUg6TW/YG3UpbfqtJgPai+g51qI96nm9YdRrHZIT81hTECFeTlNd7hpVwONFBoNJDsfnaH8BbN7kJvmUCtLsdT3ief3qbZtcjfXXOFMKm/nXOw+2nfn2OH3Hq/fqncCpOVzgzAkGIY5urJ3+tjNM5TDURmLGEIKgwxQip29h1ZZ1WGzAYNfaGCjCcmgwIWzRLE9Bh1mRGp5TZZaDnG6Bq4fLbSCkPMS9VpNlA6nikCmoShJGXM+VI6wganM6ybB6y045Zi8FTUP01se9dPtVWWugiiXHpIkch8akYcEXImNHITQcVBgVao6OTqqyplLEdtvR38dVsO7KzAVNf1pzxlJt6NwG0EzmYFGGXGxyA/oI411tohBpK8fu206+jAmDDKlBpNhvJEM0FCMe28MLDS9VO8olEuhRJoymEBZ5eEfzu4gftygGhbC8CuhCK0yCJY90mCI9oHMzWCgjORahgCw3kxfePmptuIV1FetIEA4nVWItr9dtdlredPreq1l85TQWxw9KdUOlWQM76OYKa2Qalo21+R/R+7enkftZiHs3eu2FEFiuODN1P2ZzqjInUtV/ZvZjZH7O959UFOyy+U10RMpvSq+vuwz2cDiQJsv9YpMzqcTy5fAyqb57eduZzV6YUJ0gF3Y832/RUbPdhqDZ6QbQ9TvtgAWdTsBgBM0RZThijEZeuzXy293uMPCY3w4gYBEbBoBMmlHBRzi3dnE5rvZN52jYUTsbbY2/UGLwaw2/rlbCAW6gnVDnds1JGa4cMhJ2BVFhqRBxwZDf0GNjTNWM6+cTiL9vvtFZFwsEtz5zOuuZs6CnJTe3qjlnPcYsPPrOl7IgVAHBS5IibdoLz5CRVKRiGxxVlwo9BttRgpfYhq4T5AhiEkArO0JWkXPAoSFyRBRg8wGJm1Szv2WIkWQWofJZRK2TSyMywbMjKT40ZEPIcaWfmdbIV4U2RNMJCqlZMlwgUABEg90Aezg+avGsyDBCRCzB/CucxcK4hvot8fn8/D7ZXkCZkltibQYWpXPYVrhaOferB4G8MOEmVdzeKHYinIW37f/MxZZ/UdgQK5jhVPSdkTtbB2eKr9tvHGo6ffEsgDa3p/8A34xtTA==
|
||||
@@ -1 +1 @@
|
||||
eNptU31QFGUYP7TMCRoZnWkmamA7hj8y9thlz+Ug+8BDTAyOuAMVMXlv973b5fZ21913CVJHjtLGUpu1j5mmsJLjLk8SL3EcE5uIGG1qJE0swhz+0JE0p8bJyGaQ3juBYHT/evf5+D3P8/s9T2u0EWq6qMgpnaKMoAY4hH90szWqwY0G1NFrkSBEgsKHK11uT7uhiUM5AkKqXpSXB1TRBmQkaIoqcjZOCeY10nlBqOvAD/WwV+Gbh97dZA2Cpg1ICUBZtxYRNJVvzyWsU0HYsm6TVVMkiF9WQ4eaFXs5BXcio4TJAyWJCEICEA0YggBexUCEFwJNt25ZnwBSeCglAjkJGDwkGVIAYsAg83EdiqEKEnAIBlU8GDK0RBXKRm2JChDweOyLlvSwoOjIjN81ShfgOKgiEsqcwouy3/zM/4qo5hI89EkAwRjuUYZJrsxYAEKVBJLYCCN3ssxDQFUlkQMJf16DrsidkzORqFmFd7tjidFJzIiMzKPFU33kVTZj5mXcMsPaqENNpI6AKEuYO1ICuKWImvQfn+lQARfAOOSkqmbkTvLBmTGKbnaUA87lngUJNE4wO4AWZO2HZ9o1Q0ZiEJpRZ+Xd5Sad/5djbDRtK4jPAtabZc7s8AFJh/FpkqdTYlgrhqRYkqKPzoKGSGsmOQVXMD+hDk4RKEHZjwSznbYXfqpBXcXLCl+N4DRk6K1hLBb8/lR0cr/2uVZNSb0zXIJlM094BCOXoAsIF4eIxJIQtL2IyS+y08SKck+nc7KI554qxT0akHUfVmr51FZEOcGQA5CPOe+5D7HJIyJF3uzB7w0UXRqQ/QXLlWoBVi8p4dwVqwur1wjBI00kJykGTyJ8gZBMDtuEzCGCc3gdPtpBF7KQKXSwNM+ydi/DUpwdMA6Gge2NIjBjtI0m/Iril2CXs5R0Ak6ApDtJiRktWVtRXL7S2bmGrFK8CtJJD/CbYVmRYcQNNayCGUuWxnutwQhOrypea3Y7eDvl8/FLHLCwgPGyheQyvC5T9EyPH04cRfLSQ1gCDZv6U4ysN+dbkt/cF15cFRh+Lv32k28f+8f66LMPzlu/rMxdd6q3+/XHrnrZX8yX2wd37nv/yIGMiTFrR2jvvC+vpOn9o/FvalwnLzdMtGX9PbCh5vpP1984fy06/tburIn6EIvssecH2+an/p6zq8W9q2H3Q6lSv/vrhQcufR695cusk25sPlG3tVZ+b13uoYWst+/GlZvZZ0bHRttKdzx1VYyMjGyrW9B1Nv3c2T7i2NI/Qx/1WV5q8i3qUh+pXlzdPqaODHiOdd73sNAycvqvvVWLhn4dvfgtjNT3Dj29ee617GYqVFbbeqE3p9byQIY74+Pv0syBPadN1x/B1J9XrjAyfyvOP7dnyGlX5+34MKObvmVvCRwufYYc2ZFW27r98px4Zjmz4AnWfn77xoKT3tC/++vlUO2NHwdTe8YXV5Z/wbkqtsVLvFu7LzC3s9+5mL9x+RqtYv9XS7dEeryXbMNnwo/X5K7+YE7O8MQPjfrx8fstlomJuZabLY6ysRSL5T/v1GTL
|
||||
eNptVG1MFFcUXZUooj/aWpM2mjputZbK7M7sLAtLo5UuYlBZiCxksa307czbnZHZeePMGwTFJkLVVtRkaGPSxqrIsttuQaTYGENpsZZW0baoxEg3MTZ+tRqjqfUj2kgfCCrR+TXz7r3n3HvOfVMTq4CaLiFlTLOkYKgBHpMP3ayJaXCVAXX8YTQMsYiESGFBka/R0KT+2SLGqp5ltwNVsgEFixpSJd7Go7C9grWHoa6DENQjASRU9dettYZBZRlG5VDRrVks43CmWUdSrFnvrLVqSIbWLKuhQ82aZuURaULB5MAHZZkKQwpQK0kxBQLIwFQAAk23rnuPYCAByiSNl4EhQJqjRSCVG7SDEDAck0GgMAyrZB5saASfsTHrYiIEAhn2rOX5iIh0bLY9NUAr4HmoYhoqPBIkJWS2hNZIaholwKAMMIyT9hQ4pJAZL4dQpYEsVcD2SlrHQFJkMheNpTAkrZpfeQt8ZYvyShZ6ow9BzX1AVWWJB4Pl9pU6UpqHp6VxlQqfDscHNaGJUAo2D2SPtGkvrCJ2KBRjc7ptzL4nqWVAOo6qQ/GOJwMq4MsJDj1stRl9WLz3yRykm035gC8oGgUJNF40m4AWdjlHTakZyuCgZsxT+DTdcPAxHWdjHbbMtlHAepXCm01BIOuw7ZEHj0rixEiOZlw0wx4YBQ2xVkXziDCYDczeEQFlqISwaDaynPtLDeoq2WBYGyVl2NBrIsRLePxIbHjt9hQsGdmELZEc4qrZ6RONNIpxUflAowhxOsW6sjguK52lFuX7mj3DJL5nutTm04CiB4lTC0eWJsaLhlIOhbjnmesSH75ZtCSY35H3MoZd5k33ZLCOVeWoeKVRIvBVyFfqCR18rAvSQkCR1gzRDtb1z+LcLi5d4AI0DAQF2unOzKDdbgdLBxyOTMGZyWY4BVdjhQTMOGtjqRBCIRm28kGaB7wI6YfSmLGcUm92fp6n2U8vQwGEddoHQmZEQQqMFkGNuGHGeRkZAll/DUY9ufSy7FJzv5vlOScrpLNAcHOBTAf9NlmbEZkeyRAZvDtDv4H1xAqNHHWP2TyjLtky9IwTzOy6xILJGwY2x5fYvDeUlNTv0/79JvXTefv3NTm7l4YuHO/jj35xYhY7c6DzTGLD9p1J986e+qR3au3h8Zuq5asdqwsO35yy6djfV28+OHU+NLejbp3T3+B99dc3evqnT5y79IXLG4+6hfRm/5/UNqtLbEzddjrxbUaD/er9ew/CnS2r/Q1TSwLdlxK3jD0n4W1b06F5s5dD5kbt0ppdJ6fg+vfrT06/21X2X5K3smnS7+rO4lJ514xzXdePfl4/se9U0tkdf8HFiyIoeGfFb19PvrJ1fteE3hePrE/xv5zck3x59yuTLzx3PokHydX1lSnV1XmJrhW5V3q2zEntzQMTsg+9nuMoBcknuiceL9TOnR8/v/7HBb3tO2bidukitXHnJf9b14P+vqLlY09fkagD73YeStxJae8585qj9KVpsypatv90d4Jwr/gjGhye/XPKsX8+u7R5/S97WhYX7Gg7mCgRt97q+yMnt1a937r7g4sfb7m/K+/ED292XLt2e5rFMjAwzpJ7fZKTG2ux/A/oUIK0
|
||||
@@ -1 +1 @@
|
||||
eNptVWtsFFUULgKRRxQwBDBRGDZFA3S2Mzuzr9aGlC2Fhpbdbtc+VFzv3rnTne68Oo/ttgQTSklDJMqg6A8CEbrsmlJasKQ8Skl4Ki9REyGVBCT8wMRQiCJqYoJ3t1tpA/Njd+45537n9Z0z7ek40nRBkSf1CLKBNAANfNCt9rSGmk2kGx0pCRlRhUsG/DWhLlMThpdFDUPViwoLgSrYFRXJQLBDRSqM04UwCoxC/K6KKAuTjChc6/CODTYJ6TpoRLqtiHh3gw0q2JVs4IMtIMAYAQgNyJwiEbIpRZBGgIgSRwRlKyBsmiKijJ2pI822cT2WSAqHxIyoUTVIxu4kDVOLKBlbGUtp/K8bGgISPvBA1BEWGEhScWrYMINF2b0b01EEOJz4rbzZyaiiG1bvxGT6AIQI4yMZKpwgN1oHG9sEtYDgEC8CA3XjDGSULZXVHUNIJYEoxFFq9JZ1CKiqKECQ0Rc26Yrck8uYNFpV9Ky6O5MdiesjG9YRPw6itKIw0IqrLhO0nfXYqUMJUjeAIIu4jKQIcDwpNasfHK9QAYxhEDLXUSs1erl3vI2iW/urAPTXTIAEGoxa+4Emudj+8XLNlA1BQlbaF3jWXU751B1jp2m7+/AEYL1Vhtb+bCOOTriMDK2VhArGsPZSKagoMQFZw7+Hw5APR6QSak1NC0x41KYQDJaL5jo60rzK7a5/21mmu1avCTaE9ZC2RoiEhXJI0m6Hl3Y7nU6GpO2UnbbTZJlpDzc3JzTv6urmGBUWK1iptbKeK21YHXDViuVyU0RJeISVjoYmMYDiPBRDdU3hJmRnlHq5CjR7fNWBYHkd1SB7E4kIkKp9Pt4vlhYTODozLnAlbC2jBCTQHF9Z2Rby046YFK+3hyWxzuEzlNg7ZZWMYW/zMWtCTQ3jwqNwhFQuQhfFeqjM0zvGDRHJjUbU6qJZ11ca0lU8P2hzCpfMMPX2JOYhuvJtOjdI+/xrn1J4XrIMc9IaCkXNAoJ2E35oEA7KwRI0W8Q4ihgnsboq1OPLuQk9l4KHQ3gEdR7TcNUY5dMwasoxxHX7nkv2oQzZcScz4eMpJVFCVXRE5qKyeurJ4OgGISvK+kcni1S0RiALbVm31lCW9S1tiRYOmhwXjbdIlLeNZYQIMiF/JHdF1ZSMGxwQKelWF+Nw9OY0Y7zrxrlSJE2RFH0iQeIxR6IgCbie2d/cGtOtpBMX+9izBrhfCC+8NJvtBnVqvIWGJEzYjO+nMKzX6z35fKMxKAabeN2uExOtdDQ+Gtoh6ceeNchB7KP0nsSYNSlw1nA+PoQB74q4OMR4XQxDcR7ew7p5wHo5ikFOnoGe43j1CRCjZJqpKppB6gjinW20WsMFEkhkdkwJQzsZF860mBBkKJocqjEjZUomB72YUDUkKoDr85WTPgCjiKzJ8s9KlzWsK62q8A3Uk+OJRPrV0e9FWlZ0WeD5VA3ScGOsbigqJoeXpYZSGCtY2mAd8XAsxfMR2gsgz0LoIlfiNTSG9j/tkplNmwYijj0Orf4oU2IrYlnGVkxIoMTjwm3KflU2pTK5yo3nJ4UWfTQtL/tMFrdflc9QszsetM64fG/KHGLV1YOd7T9rv+4Nbule5vhuxzHz5IVb3y86N1g3fVLw9PnOf97q7x+cueDusntL7349N3/5ga7Bb87zA6/evvZF38xPZo38JhzfffXA7XsXH6NzZy9sOjBru2G8lF9bO3kp+2LB69Pa33Du+Vi+K+zIr5i57Eqpe+BMkF2YP+cnecsP1MIlu/vLlpz+47OhqdvogdjtVNfFO7vS4oKFO2ecnD/14aNHW+mRgHvG/FvL/Wdmz7oW2LeYku7vEhZ56m+8z56vPGimH/g/Zeddv3Op5d9k387qU1dem/ZJquLPjs+PGouox7fk0JPlFw+evRQs/3t6avq2Nzs9H54Cs+LbtneMXKxILCm4vKnjx1Mts+1bChavKO4sVfovWzdsL99cWvNecsr6V/5asefLS9cvzf2lp+b+1ptrRzavwOV78mRynvbBw17wQl7ef6XuMgw=
|
||||
eNptVWtsG1UWdgiPwv6gCIraCi2uQQiBrzMPj2OnhG5qO2lIY8eJQxJKa13fuc5MPK/Mw45TWtqEh4BdYKBQIfEojWO3aUiBPiClRS2IR0XDQ4BEgIVKkP2xoouAVVeobLvXjrNN1M4Py3fOud/5zjnfOTNUzGDdEFWlalxUTKxDZJKDYQ8VddxvYcN8oCBjU1D5fFu0Iz5i6eL0bYJpakZdTQ3URI+qYQWKHqTKNRm6BgnQrCH/NQmXYfJJlc9Nb9nokrFhwF5suOrWbXQhlURSTFedq01EaSd06lDhVdmpWHIS606YVDPYSbncLl2VMPGyDKy7Nq13u2SVxxJ50auZgPVwwLT0pEr8DFPHUHbVpaBkYLfLxLJGMiFWcpvyBDYVBQx5kuZ3jsV5QTVMe2Ih9b0QIUwwsYJUXlR67Vd6B0XN7eRxSoImHiOEFVwujD2WxlgDUBIzuDB7y34VapokIliy1/QZqjJeSRCYOQ1faB4r5QNINRTT3h8lJBqaa9pypMaKk/b4aA/96gAwTCgqEikakCDhU9DK9rfmGzSI0gQEVPpnF2YvT8z3UQ17tBWiaMcCSKgjwR6Fuuzz7pv/XrcUU5SxXQy2XRiuYjwfjvXQjMf/2gJgI6cge7TchjcWXMamngNIJRj2y1QBqWpaxPbXVVckEiiVSMr1uSa9ayDV2Ki0NGodGTEbEvmYv39t1t8Y6+xrEhNJXbU6E1yK03oAXeulmVq/n+EA7aE8JGcQFBCTs9ZmuTZ6dbOCukQ23S92rY2kI81MPBxjUYCSWjV4VzQQiHdFlPAaHNYCAR0pmcZ2IxmDuL2pvd3MWg2xOO5INw/0qVysi7mnAfnboi0xmNFoabA76K3lGWOlk1C2MiJfH0nTob54MsP51nQKbISPRsI9kTDfz69uvyeawd5of6Ij0Z28m2lG8zj7aR+gKrR9lNdPlZ6JOcVIWOk1BXuEZvy7dGxoZIbwcIEU0rSMoTxRJz7xYbEyTDujLeeFfX0+RJRqH4kLlttJ+ZytUHcyFMM5aV8dy9Z5fc6m1vh4sBImflFhvhYng2ikiDjDc4NQRIKlpDE/FrzoCBwpjQDpb4k+GVaABzTVwKDCyh7vBu2zWwQ0h/bNzhtQ9V6oiIPlsPaR8ixkBweyPLJ4XshkZSow6GXFJLZQan/liqarpTCEEJANe4ShvBMVy5wax0iuFKApQNGHBgCZfSyJskjqWf6trDLDznOk2G9e6GCqaUyWXtFb7gb19nwPHctExqXY52G8gUDg8MWd5qBY4hKo5Q4t9DLwfDY0IxtvXuhQgdhJGeMDc95A5O3pm8khkfKxqJZJJiFLQ4ZPMhxXy9DkmPLXYl+AqZ0k21BEBKXUTE3VTWBgRPa2mbOn3TIcKG2eepbmWB/JdKVTVJBk8bjDSobUUg5E4JqOJRXye1EKIIgEDGb1ZxdDPZGG1ubgwW4wX0ggqs1+M4qKaihiKlXowDppjD2GJNXiyQrVcSHYCNobeuz9ARqxXjoJ/QHM+XmaBqvJcppD+7/s8qX9W4QS4Z5B9j6BrXfVeb2sa6VThvV+H2lT+cuytVDKVel9r2rLjY8tcpSf6r8+9ZEyTC0O/+e+B2e4Ry6ZqsbTu8ZeP4VinUv0n6q+XfrNgYfvvG7m+9uvvSrUOqp0n3hu4yT7QzC06JmhiStnlgQ3rOMmP379zKkhfPUTv4LJv292nz4arspG90xNPfR+A8zsPfpLEzdTOHhV2/JN73xbFVw0cbzvxfwed8t2sOtvjiX86HvHvzTpY++fOsE/m38s0rPs+LHPudzji1b8+OlmUoSTT08cd5/d/cXJ+tu3Tpy5ZfUu9rb4rb9tGOSXi3f8RfEOKeiTLnRyh3DHn46ZI/GuVafvby/s//in+z44M7jiwL87/7UlNzK8pwmJLU9cs6LmVGiqZ03fC7vBP4TTO4Yhs2r6M/hw9dqVD13+3HLx2V8u23evg9n9h7BiG7th69Jt757uvim/bKvg/fVn9/bDUdSy/uwHmx3PD4cXT1rOm1e9lBWMxX/+KiH/99Fvjn4xvHH7Dfq2k+8su/S68Vjx845zM9f88/CThy7tfUH6+fsXf1+6qdrhOHeu2jE0tXPH2Uscjv8BZV1LvA==
|
||||
@@ -1 +1 @@
|
||||
eNptVX1sE2UYHwMTBFRCAP9Q4Kya6dzb3bXXdt1YYOsGm6PrtnZjQ7Be7972br2v3Uc/BhjcFAXGx4EkSvwD99GOOcYWFj4FEghiAE1MJDhQZoSEqCAqJigxwbddJ1vg/uj1fZ7n/T1fv+e5tmQEKioniVP6OVGDCkVr6KAabUkFtuhQ1d5NCFBjJaa7xuP1dekKN5LLapqsFubnUzJnlmQoUpyZloT8CJFPs5SWj/7LPEzDdAckJj6yY61JgKpKhaBqKsTeWGuiJeRK1NDB5IM8jwkQo7BmKYxeAUnXsACkFNWUh5kUiYcpK12Fimn9GiQRJAbyKVFI1oDVbAOargSklK2IpAR6q5oCKQEdghSvQiTQoCCjxJBhCgs3O9YnWUgxKO1rWbO7WUnVjIHJqRygaBoifCjSEsOJIWN/qJWT8zAGBnlKg30ofhGmC2X0hSGUAcVzEZgYu2UMUrLMczSV0uc3q5LYn8kXaHEZPqruS2UHUHVEzRj2oCBKKvNr4qjmIkaYyQIzPhgDqkZxIo+KCHgKxZOQ0/rjExUyRYcRCMj000iMXR6YaCOpRo+boj3eSZCUQrNGD6UIdvLgRLmiixonQCPpqnnUXUb50J3VTBBmx9AkYDUu0kZPuhGHJ12GmhIHtIQwjE/xBC1JYQ4aI3/6/XTQHxCKq/2BBm5VoMJfWoeTDS7GAhuhF/eGI25fZXOpfWUTUxcrEPlIU3UJIBwWJ+Gw2exOQJhxM2EmAF7CN1aYWywVKxvL6BjL+jRLrduPO8h6rsEFNY1plFY1qERlgx6qIOLmKBFcUc+W15XE9Rp9Ob+8vNxTWtmimEsFV9RKlrfavQJZL4WKMBSdHuGY4lX0Knt5XdQmttjibp3x2kvCimyJyYS/0s6WUNW19AqIE1GHtWxieFabBeCZCO04WYCnnoFxbvBQDGms0UWQZK8CVRlND2xPoJJputrWjXgIL36ZzIxRp6fqIYXnd5chThonfKyehxEOzENrmAW3kBhBFlothTYcW+729bsybnyPpeCQT6FENYhoWD5O+STN6mIYMn2ux5L9RIrsqJOp8NGUAhiTJRWCTFRGfyOoG9sfoLLs4NhkAUkJUSLXmnZrnEizPtoaizK0zjBsJCrgzlbSygWgTgeHM1dkRUq5QQEBQTW6SCs5kNGM864P5YoDAgc4cSwG0JhDnhM4VM/0b2aJqUa3DRX7yKMGGto6aN0lyXQ38JMTLRQoIMKmfD+EIZ1O5+ePNxqHsiITp8N+bLKVCidGQ1gE9cijBhmITlztj41bA44xRl5CB78tYLMU4EwAWi023A6DpA1xK2CF0BZwWgnoPIpWH0cjlFQzZUnRgApptLG1uDGSJ1Cx1I4pthI2qx1lWoRxIs3rDPTqgTIplYNahMkK5CWKOeBaBlwUzULgTfPPSJahSXNXug41golEAh557GuRFCVV5ILBhBcqqDFGH81LOoOWpQITCKuupMkYLmBIAvkNEiiFgiBkQClaQ+No/9OuO7VpkxSPYo/QxkHWWmwqJEmrqQgTqOICO2pT+pvyTiKVqxg6O2Xtoi3Ts9LPVL52qPoMPvf7u/Oeyc8Dzdk9nTNzS2dN3zRlWumG3Mvxre4Fp368l+3rfbBt58ZDH5gX/rbu5vG70ey9qwe+2NtxKXD+/L2cy44793YlO0avn17zw3u3Ni9smWNeJAqtna1b24ISe2OlzvT2L97cNZhwl4HvjiS6buUteOr9ZAjU1m07aczb/df27RvX77vecb+dfC7yetXX2GfE4pysrNHbI8ws59v7iFmtpyo2XTy21HZtdXaNsa79la9uNWqzL13dubHtLZZecuXlG3OffiLY++KZnF+m80/+8/ybp6uOLO0KL5u9pWrw/tSra/HlH7avufDB3T2699Vvr0X/bt71jbe2fE/LOen3ozO33SY7gzPcC+f3+S4MzNlwObLlvqe4vjz3tY9nAJzc+PMfV6qFy6MveArrKoZHfxrpPXxO2NPF3oyHquDw0EfS7nYmeeWB415T/69Ha/5VKwtzzlbNz0uqz87oqNdu3xn2DuxchKr84MHUrP07pi35JDsr6z8PST+u
|
||||
eNptVQtsU+cVDgLRVlPaTBQQQms8066Q5rfv9fUzWZSmdpI6iZM0NiGhm9Lf//3te+P7yn04thmtgE6jQCXuKgErj6rE2DRNwrOUhoQ92CrYoy+1SEm39N2qXdVpQ5Xo6Ep/O85IBFfy495z/u9855zvnLs1n8SqxsvSohFe0rEKkU5uNHNrXsUDBtb0J3Mi1jmZzXZ2hCNDhspPVXG6rmg1djtUeJusYAnyNiSL9iRtRxzU7eS/IuAiTDYqs+kpYZNVxJoG41iz1jy6yYpkEknSrTXWCBYEi4gt0NIvJ8hPVDZ0SxRDVbNWW1VZwMTH0LBq3fzzaqsos1ggD+KKDhibC+iGGpWJn6arGIrWmhgUNLw5z2HIkpRmyiqynKzp5thCmscgQpggYAnJLC/FzdF4hleqLSyOCVDHw4SchItFMIcTGCsACnwS52ZPmcehogg8ggW7vV+TpZFSMkBPK/hm83CBPSCZS7p5uoOQaAjaO9OknpKFtrlpG308BTQd8pJACgQESPjklKL93HyDAlGCgIBSr8zc7OGx+T6yZh4JQdQRXgAJVcSZR6Aqup2n5j9XDUnnRWzm/Z03hysZb4RjbLTD5j2xAFhLS8g8Uiz6ywsOY11NAyQTDPN5KodkOcFjc3rRbX19KNYXFeuaMt0u/2BHu7Yh7WlsNxLQL6EmIZUJtjUqjXFbWKCNJNVi9FJcCNAeJ+3weL2MC9A2ykZyBkGjTac7m/tl0RH1BjemI2If1YnCzaFWpokRwrwt2Rps07yhxlhXl+EKYKWnRersiEn+9oZWFGn1C20q9vcEbL2phrTLkXxkfcLVNoD62nimoV8NRlUYb4O+zMZOd6C31kIoG0merUs12WwupTXp0CQUHkDKQxyfoR/ObGQTjXExHIjIA93hABfu7fI+Mo8zRbkBVaLtppxeqnCNzSlGwFJc58whmvIeVbGmkHnB23KkkLqhbc0SdeK/XsyXBudwR+sNYa/IBohSzckIZ1RbKLclBFWLg3K4LLS7hmFqXC5Lcygy4i+FidxSmCciKpS0GBFn49wg5BFnSAnMDvtvOQKThREg/S3QJ6MJcEqRNQxKrMyRHtA1uzFAMHBqdt6ArMahxGeKYc3J4iwMZlKDLDJYlksOipQv42T4KDZQ7HTpiKLKhTCEEBA1c8jh842VLHNqHCa5UoCmAEWPp4BKSiHwIk/qWfwurS3NzJLyU2dvdtDJpiELLu8sdoM6P99DxSKRcSH2DRinz+ebuLXTHBRDXHwe1/hCLw3PZ0M7RO3szQ4liMOUNpKa8wY8a07dS276fJjC0EVHfYybdUVdvhhiPZQbuukoZijswK+Q3ccjglJopiKrOtAwIjtaT5tT1SJMFTZPHUO7GDfJtNbCS0gwWBw2ogG5kINWa1FULMiQPYZiAEHEYTCrPzMf6G1vCAX9Z3rAfCGBDmX2/ZCXZE3iY7FcGKukMeYwEmSDJStUxTl/E+hq6DVP+2jEOGkcY1inj4l6WfAQWU5zaP+XXbawf/NQINyTyDzFMXXWGqeTsdZaRFjndZM2Fd8iW3KFXKX4nxaNV+68vax4LSaf69d3dYUSq+mKyX8d2/eFcNu7q5d91Mq3b9nzxuUjtidPXnzry5aXutdkp8Z/fPW3K0frL+fKPz58YdOVQzO/mLi77A/9J5Y8v4t9Z6nnfF91d1/lXy69bH+xo/Lcu5V3TTx+deK7787uF+pH/rj2Ae7KD5Y/F/HsmH5/N/hm8ai15dUvaybPHXztqxU71ZllgNd7Ly+5Z6/nCj044P/oku6uryrv/WWw+oMXyspSn7934M3Et2v2UKsO7l4R/nX5jk9euf3BgLrqh4779vdkVgxtiXy8anPlJvFQw6PlQoV77RrB+megL0ruCd11f7Mwc6Guauq+JduOP3PHvrVM7dF1W39/9av/bNtu7GWlNwZf+9G//+c7MfSTwHR2zROvJv4pOtYHfnPxM/2pddsP/WNlWf21S/qGv+2oeKH8Z7T4ZvwCnz6+7IJtqZo88Oxj0c+XTvz0qbG/Vz1dPdq68pnlLVXbMp13JnYfPKM33L2y7vrqT5+Y+dX5UXkmWl/RAh97e/32o6PjUmrtzn32r186eWXXtd9Z4bfi8m6B/rSOG8Gf3em59y0tKrw4/d+laPuZ+uzjr1/70F5s0OIyZnDa3Uy69T3LDll/
|
||||
@@ -1 +1 @@
|
||||
eNptVX1sE2UYHyAIxOAiqAn+sUvBRHDX3vWuXbs5ktExQDa6jwIrhpS3773XXntfu4+uHRKyOUgIDDgJMVFAga7FOccmhI8pKhINGBMSwI8ZIcAfEgNBEREVyXzbdbIF7o/23ud53t/z9Xue68gmkKYLijyhV5ANpAFo4INudWQ11GIi3ejMSMiIKly63t8UOGBqwtD8qGGoernDAVTBrqhIBoIdKpIjQTtgFBgO/K6KKA+TDitcaujNdTYJ6TqIIN1WTry2zgYV7Eo28MFWL8A4AQgNyJwiEbIphZFGgLCSQARlKyVsmiKinJ2pI822fg2WSAqHxJwoohokY3eRhqmFlZytjKU0/tcNDQEJH3gg6ggLDCSpODVsmMOi7NT6bBQBDid+uag4HVV0w+obn8whACHC+EiGCifIEevDSJuglhIc4kVgoB6cgYzypbJ64gipJBCFBMqM3LL6gaqKAgQ5vSOmK3JvIWPSSKnoUXVPLjsS10c2rCN+HETVUkd9ClddJmg767FT/UlSN4Agi7iMpAhwPBk1r/94rEIFMI5ByEJHrczI5b6xNopuddcB6G8aBwk0GLW6gSa52cNj5ZopG4KErKyv/lF3BeVDd4ydpu1lA+OA9ZQMre58I46Nu4wMLUVCBWNY+6gMVJS4gKyh30MhyIfCUiW1pKkVJj1qLAAba0RzOR1uWVRW1rzCVa27Fy9pDIb0gLZECIeEGkjSZU4vXeZyuRiStlN22k6T1aY91NKS1LyLG1riVEhcykqp2mauKri43r1SrJFjYSXpERY6gzGxHiV4KAZWxUIxZGeUZrkOtHh8DfWNNauooOxNJsNAavD5eL9YVUHg6MyEwFWyKxmlXgItiYW1bQE/7YxLiWZ7SBJXOX2GEl9dXcsY9jYfsyQQC44Jj8IRUoUI3RTroXJP3yg3RCRHjKh1gGbdBzWkq3h+0BsZXDLD1DvSmIfomzPZwiDt9y97SOHn0tWYk9bJQNQsJegywg8Nwkk5WYJmyxlnOcMSi+sCvb6Cm8BjKTgQwCOo85iGi0Ypn4VRU44jrsf3WLKfzJEddzIXPp5SEiVVRUdkISqrt5lsHNkg5NLqwyOTRSpaBMhCW96tdTLP+ta2ZCsHTY6LJlolytvGMkIYmZA/UriiakrODQ6IlHTrgMdL9xU0o7zrwblSJE2RFD2YJPGYI1GQBFzP/G9hjelW2oWLffxRA9wvhBdels13g/p0rIWGJEzYnO+HMKzX6/3k8UajUAw28Za5B8db6WhsNLRT0o8/alCA2E/pvclRa1LgrKG5+BDyeHBbEcWHUZgHTJhy8U6Ogy7K5QE8T1HMCbz6BIhRcs1UFc0gdQTxzjZS1lCpBJK5HVPJ0C7GjTOtIAQZiiaHmsxwtZLLQa8gVA2JCuAO+WpIH4BRRDbl+Wdlq4PLq+qW+o42k2OJRPrVke9FVlZ0WeD5TBPScGOsHigqJoeXpYYyGKuxKmgd8XAsxfMgzEIXw0LoJhfiNTSK9j/t0rlNmwUijj0BrcNRptJWzrKMrYKQQKXHjduU/6q0Z3K5ypEvJzSUbJlalH8miTto/2mqeOO9f6ff+DkdW+hbdnTG1M7ptU9N3XfG7Nt8aqBrvTV9xXCFNvvH9xK3f1vwWeuv8vP0lI86Otqjns3f9XB/9/8z+ODqpUtiy7vkMceuttWV117mh+7emXjtCnXj6Vl/XH976oXVG+d2LVh7dcfMTReNvZfSd145F6mM1m2c/0Ns2dlv5/oP6vGdtRdKS3Z3WRMrZ90kw1sH1gxv2jb4BTlLnD08j2w7F++89tdmWq2ZPPn4vfMzv3/i9rTpB6aZoYp3trefrrueeilT0T1l/Ybd6VppT/B+e8ndrovny7p9t7pmvL9368a9t+Z8dfl+1+CGF0+VX1g9/+zr+2e0/3Kz+MnsC9uufH6meNUc73ZH565nn3nr6+0zJnZsmfLBTn12l7uiJDXvWv+ei3eDr7Jz24f/7JROWIM1kT03/T+FtJIHE4uKhocnFT0oPrUW4Pf/ANGuMu4=
|
||||
eNptVXtsU2UUL6AZQWVoFBON8VogGbjb3UfbtcUZt3Udc6zd1sKYaJqv3/3a3vW+dh/tHszgUOKDMa4CYkRA2FqzjDkdkzeKBsWIkaB/OCCQaGKEGaPxFcPLr10nW+D+cXPvPef7nd8553fO7c4kkarxsjRjkJd0pAKo4xfN7M6oqNVAmv5iWkR6XOb66gPB0B5D5ceWxHVd0TwlJUDhbbKCJMDboCyWJOkSGAd6CX5WBJSD6YvIXPvY2k6riDQNxJBm9azutEIZR5J0q8daz8MEAQgVSJwsEpIhRpBKgIicRARlLbaqsoCwl6Eh1dr1XLFVlDkk4A8xRSdZm4PUDTUiYz9NVxEQrZ4oEDRUbNWRqOBMsBWfpmxUVyaOAIfTvGCZ1xeXNd0cmk79fQAhwphIgjLHSzFzb6yDV4oJDkUFoKMBTFhCucKYAwmEFBIIfBKlJ06Zw0BRBB6CrL2kRZOlwXyCpN6uoFvNA9l8SFwNSTf3BTCJ8pqS+nZcY4mgbU7aRg+3kZoOeEnARSMFgPmklZz98FSDAmACg5D5/pnpicNDU31kzeyvAzAQnAYJVBg3+4EqOu0jU7+rhqTzIjIzlfW3hssbb4ZjbTRjc30wDVhrl6DZn2vD/mmHka62k1DGGOa7VBrKcoJH5tkZBeEwjIYjYll7tdrUFvX5pFqfEkzyKS/PNbhal6dcvoYVLdV8OKLKxoqwI+pQmkm61E4zpS4X4yBpG2XDOZOVcci0G8tTjnq6okaCTTybaOWblvsT/homVNXAQjcl1Cng6YDbHWryS1XLUJXidqtQSvoatUgDQI3VjY16yihvCKFgoqatRXY0NDHPlENXfaC2ASQVWuhYVWkv5RhtKYEpG0meK/MnaG9LKJJ0OJetiLN+LuCvavZXca1cReMzgSSyB1rDwfCqyEqmBk7h7KKdJJWn7aTsLip7DU0qRkBSTI+be2jG9Z6KNAXPEFqXxoXUDa27D6sTnTqZyQ/T7kDtTWHP7/NipZpHQ3GjmKCcRB1QCYZiHATt9LCsx+4kqutCg5X5MKHbCvODEB5ELYrFWTU5CBkYN6QE4gYqbzsCR7MjgPubpY+HlURtiqwhMs/KHFxFNk5sEbLGOzIxb6SsxoDEd+TCmkdzs5DqaEtx0OC4eDIlUu4OO8tHkAGj+/JHFFXOhsGESFHDxXExQ3nLpBoHcK4USVMkRR9qI/HsI4EXeVzP3D2/yjSzz4GLfeBWB11OILz0MvZcN6hjUz1UJGIZZ2PfhLG73e4jt3eahGKxi7vUeWi6l4amsqEZUTtwq0MeYjelDbZNepM8Z44txC9hdykHWQcCkYg9iqIMa3c5KQo5aQTcTpaNMgfxNuQhRsk2U5FVndQQxHtbbzfHikXQlt08ZSztYPExainBS1AwOBQ0Il45mwMWuKIiQQbc+zBKQgDjiJzQn5nxNvvL62oqP1pFThUSGVAm/hkZSdYkPhpNB5GKG2MOQEE2OLxCVZSu9JGN5c3mPjcNWTsdKYWcq9TF0TRZgZfTJNr/suvL7t8MEDD3JDRH4myZ1WO3s9alhAjKXE7cptyf5YV0NlcpdmJG96OvzbbkrlkbXg/Xfkbdf+LilcUVOw6D8eubjy0uerWQ8Xq9T289b7zF3/f66nc2dKVqzu19xP9bf+E/qW8uHfwn+uC9Ff273O+u+XpT08qeoZFfLl1fYfvwzROf9ZwtenIk3PvN/tR58a8HetZ9Wlz6ye+7HtKCzXcW9Uh06pN53Qlm58JLT3x1pnnWkgVP3dnMt+ruHRuHk7XsoWVn+MyzD395/NvtR6penPdh4thje+YP97sPvly4Zc6R2KLTV7/3zjZ8L81BF2p7ly/o6TzZ4ztd94v+5B1zV47GHts2tOXy5Ss/7ty8eC9be/F32LVo9M/xT2f84Sto6v38zPpfF1RcePuVy29s+s4TbC4+vWbt3PVf9o3uuTa3c1vy7nsYx+lTP7DRlyxc47/HK4pia+7Z3fT3OTi6KWrZffzkxyPb/5C+2Hroam9XoavgNWLO+FPLnvfIF38inhj++OzGx6WZwcJfg2pRffddsXnDkdGWQODa/ur3Xt3y3SLXOKfd+OnUlZ8LLJYbN2ZZNj9EJK7PtFj+A1XHTc0=
|
||||
@@ -1 +1 @@
|
||||
eNrtVVFv3EQQVsUfWa2QkND5zufz3SUmrRRFUVuVEFACUhUia8+es7exd93ddS7X6B4IfeXB/AIgUVJFLfCAeIFKPPLAHwjv/A9m7bsEQlCrPtfSyXczOzPfzH7z3dHZPijNpbj1nAsDikUGf+ivj84UPC5Bm6enOZhUxsd317ePS8Uv3k2NKXTQ6bCCt3XOTdrOmEiilHHRjmTe4WIsT0Yynv52lgKLMf3T8081KGc1AWGqn+zpOs4pph233W13vaUfVqMICuOsi0jGXCTVi+QJL1okhnHGDJw27upHVhQZj5jF2HmkpThfk0JAjbk63wMoHJbxfXimQBfYBnx5qg0zpT46wbzwx+9nOWjNEvhu88EC3FfHn3FWnSMMkkiZZPA8kjgJYRwzLeC/JU+xFZxZdVbu80gq8bPFprWDSIySmbPBDmyn1fHAdX+95lvNMjlxNuqB6urb979fm5f6EERi0urYX+r/cmPMpuIJF9U3F+RG95qCGPNwlunqxKgSTmIcW/VyOy1bpDskm5Ehnuv5pOsHPS/o9cndje0XEYtScKImU/VMSKe2nK1mxtnaj6qLdtq7TQPf79EPSM5ue/1lz3XdVtpzvOUbHC+vYVs/KKQG514zaOz35nlc+WvafIF3ppADf97665DO2UkD6raH7YFPWxRvA/BqQzgouKrvJTQ8BxqIMstadMRMlIYYj+QNsbcxT2hwSEuMyMvM8IIpE4KIC4mEp4EdVovqiGUQlkX4WPMnEGL9JAFFg65t98orTKoQrA4zjgRG92DhjOVEhALywkyvon302nSL03WuS0M4mhrQNPDc5WG377mzFuUC6SoiCJH1ibawcWVwKQ2EjIe4j2qK0Nkog3iBXKokjBBUPYeY67lzjEywfaVyEhqThSVfBBjcceyQgwrjcj6/mE3rapkUid0fTODXYFOpzNzQ9RGgBqZwutcwTKTa04VNqyNZQGgxcbHP6/YWSHqhNlLh7v07ejb7f6lZe5XU4AetuqOyvIOpnUJJvAEny1jOOlY4tHmrRG+mREvDt0p0qUTv7B3ShmthynSKajRY7jEvHvV9fwi9UX8pcgcuG7DI9Yb9pTgajrvMPj7AwOv6cdcbjsZDd4Q4u/2lQc9FHcuZ4GNkqF08jtuwQy/Jjd6Gyhq/ocXgaw1fH9fGbZQZy0W6i2IY4WbiUiNBEBUOEBGXES4aRuxNmGpUZM41/L7zWrXulQhuowl605pN0lc1Nz/Vom9axiwiArpz/6Ot7d2Vla2HW3fukIeyJEwBYYIwrbkVVkPGUpFaa3CHHCb0BOz9EsP0nm4T1AZiUsBTlg3WUXBAmhA5JgqQCoBKSOqlPDDESNJkqGMWWdvk/phMsXYsxXuG7Ak5qf3N0RZ5VGpDNJuikZlrBxcIFADRYPfRFs/ZAc/LHDPExArLP9JZLBHX0F5Z6TRdfy4+mQMJyOEC0wzNaw1qtM7xW+NqnSUgO516dNT+AxWlCfeZ4lajLWHoIoulRxNqb2cx9xBHmiNpAjp2mm2hM3x2XzvVDP9W4IBhtvrM7uxvcr/ytw==
|
||||
eNrVVk1vG0UY5uPGkV8wWiEhIa+99vojNilSFKq2QGhRTNUqRKvZ3de70+zObGdm47iRD5SekZZf0JIorqKWooK4QCWOHPgD4cBv4Z21nVInbapwwrJl+/2a5/16Zu9OtkEqJvibjxjXIGmg8Y/67u5Ewu0clL53kIKORbh36WJ/L5fs6L1Y60z1ajWasapKmY6rCeVREFPGq4FIa4wPxL4vwtHvkxhoiOHvHX6pQNorEXBd/GysSz87G9Wcar1ab3aerAQBZNq+yAMRMh4Vj6M7LKuQEAYJ1XAwVRc/0ixLWEANxtotJfjhquAcSszF4RZAZtOEbcNDCSrDNOCbA6WpztXdfYwLf/4xSUEpGsH3Vz+dg/v7jXd/M+GVsjGYliKxV5JEDO21Mm9VPPhg/2PEUDzrx3mFOG2yRiVpOI0Wqbd7rttrdsiltf6vp8a4KlnEeHH/pz5LMa0F6eNVGsQwdymeZrmP2VVISndsBHmh7UxWEm2vbwfFUTV2L1i9ZtO1PkT9hUar23AcpxK7dqN7iuLZApyLO5lQYF+e5ow5nZ7zc/3+dSpHxcEU6g/GCptnfwY80nGx1+40flkIsIagscOoc5y964wWh9hZEgkRJfDkhm2sSxgMe1M8cA6uSRqltHjIhR2YMjy9YWOZaSgiu49jCPaVsDgi9S64IQ0dGviNptsMO92u67ZD120N/G7Q8Y/IqXmsSggRL6OJKva1zOHRPIP+KIOTczSZA5s1uU4+oRwP7zjEcXrl2zS5HOuvcaYkNvOvt+7vWrPtsXqWU+1WW22rYjGOM8cD8HB0I2X1di0/Eb6ntMCMwQNO/QRCq2dgVRZ1WGzAYOsuBgqxHAq0Bzs0zRJQXponmmVU6sUgZ1vg6uFya/Ao83Cv5WjRQMjICySUJfFCpmbKAVYQtRkdpVi9RacMsxecJh56q5Neyn1Z1gqoDOIT0lgMPa0TL2dzkTaj4GkG0gtzOUNHR2VZE8Ejs+3o38RVMO5SzwT15rhiDYXcUpkJoAKRgUHpMb7NNKhjjHeUDj2krQy7bzr5IiYM4lONSLHfSIZoyAcsMofnCl6odpgJJNDjTAKagJdn3m3F7iB+3KIIJMJySqBzLdcxljxUXoL0gM719lwZiiH3OKSZHj33bqLWhJtbl7GOBZ4/KhNrON1OvdVwxuN3Xs7iq2exOH5QqmoySWvYQTuTWCNtJwlNac1wstL/I5L/9iyKPw9974evvBbahjHOTeCPghlh6VMJ6z/z+wlKX+q0HpdEbAez++iYml+XZF91JRzgcCBZFpN8mwVC8sUr4kVqfXtr15pOoBdTFSMjtrsubYR+q9nsgOu3lgKn7dA2DZxGp7UUBp1BnZpXE6DdqDfDeqPjDzqOj2Wtt5baroN8mlLOBji3Zn0ZLviGdTzyqJ0OuMJfKNH4tYpf10phH/fQTKi1WbGSABcPeQm7gqiwVIg4D5Dl0GNrSOWU8WcTiL83XuusyzmCW5s6nffMadCzkptZVazzHqPnHj1r48rn6/3N5eX1m+sffURuipxQCQQvTopUai5BTQZCkpKBcHBtytUQTH8JXmxbqkqQMYiOAa3MQBlFxgBHiIgBkYCjAEjmpNyEHU20INMIpc88apVcGZARnh0K/r4mW1wMS/3UtEJu5UoTRUcopHrBcI5AAhAFZh/M4fj4xdI8xQghMXTzr3AGS8AUVJeXa9Osv+JfzID0yO4c0xjFq1PUKJ3hN8KVMkqPbNTK0pVPC1muvW0qmbl2zMBY8yhmPKaupjvzuntY0hSHpmcN7Om2WGN8bb52qPH4+QMD2myO/wFK03fT
|
||||
@@ -30,7 +30,7 @@ At a high-level, the basic ways to generate examples are:
|
||||
- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples).
|
||||
- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves.
|
||||
|
||||
Which approach is best depends on your task. For tasks where a small number core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
|
||||
Which approach is best depends on your task. For tasks where a small number of core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
|
||||
For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input.
|
||||
|
||||
**Single-turn v.s. multi-turn examples**
|
||||
@@ -39,8 +39,8 @@ Another dimension to think about when generating examples is what the example is
|
||||
|
||||
The simplest types of examples just have a user input and an expected model output. These are single-turn examples.
|
||||
|
||||
One more complex type if example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
|
||||
This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where its useful to show common errors and spell out exactly why they're wrong and what should be done instead.
|
||||
One more complex type of example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
|
||||
This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where it's useful to show common errors and spell out exactly why they're wrong and what should be done instead.
|
||||
|
||||
## 2. Number of examples
|
||||
|
||||
@@ -77,7 +77,7 @@ If we insert our examples as messages, where each example is represented as a se
|
||||
One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated.
|
||||
- Some models require that any AIMessage with tool calls be immediately followed by ToolMessages for every tool call,
|
||||
- Some models additionally require that any ToolMessages be immediately followed by an AIMessage before the next HumanMessage,
|
||||
- Some models require that tools are passed in to the model if there are any tool calls / ToolMessages in the chat history.
|
||||
- Some models require that tools are passed into the model if there are any tool calls / ToolMessages in the chat history.
|
||||
|
||||
These requirements are model-specific and should be checked for the model you are using. If your model requires ToolMessages after tool calls and/or AIMessages after ToolMessages and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy ToolMessages / AIMessages to the end of each example with generic contents to satisfy the API constraints.
|
||||
In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models.
|
||||
|
||||
@@ -91,7 +91,7 @@ For more information, please see:
|
||||
|
||||
#### Usage with LCEL
|
||||
|
||||
If you compose multiple Runnables using [LangChain’s Expression Language (LCEL)](/docs/concepts/lcel), the `stream()` and `astream()` methods will, by convention, stream the output of the last step in the chain. This allows the final processed result to be streamed incrementally. **LCEL** tries to optimize streaming latency in pipelines such that the streaming results from the last step are available as soon as possible.
|
||||
If you compose multiple Runnables using [LangChain’s Expression Language (LCEL)](/docs/concepts/lcel), the `stream()` and `astream()` methods will, by convention, stream the output of the last step in the chain. This allows the final processed result to be streamed incrementally. **LCEL** tries to optimize streaming latency in pipelines so that the streaming results from the last step are available as soon as possible.
|
||||
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ Use the `astream_events` API to access custom data and intermediate outputs from
|
||||
While this API is available for use with [LangGraph](/docs/concepts/architecture#langgraph) as well, it is usually not necessary when working with LangGraph, as the `stream` and `astream` methods provide comprehensive streaming capabilities for LangGraph graphs.
|
||||
:::
|
||||
|
||||
For chains constructed using **LCEL**, the `.stream()` method only streams the output of the final step from te chain. This might be sufficient for some applications, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output. For example, you may want to return sources alongside the final generation when building a chat-over-documents app.
|
||||
For chains constructed using **LCEL**, the `.stream()` method only streams the output of the final step from the chain. This might be sufficient for some applications, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output. For example, you may want to return sources alongside the final generation when building a chat-over-documents app.
|
||||
|
||||
There are ways to do this [using callbacks](/docs/concepts/callbacks), or by constructing your chain in such a way that it passes intermediate
|
||||
values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
|
||||
|
||||
@@ -16,7 +16,7 @@ This need motivates the concept of structured output, where models can be instru
|
||||
|
||||
## Recommended usage
|
||||
|
||||
This pseudo-code illustrates the recommended workflow when using structured output.
|
||||
This pseudocode illustrates the recommended workflow when using structured output.
|
||||
LangChain provides a method, [`with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method), that automates the process of binding the schema to the [model](/docs/concepts/chat_models/) and parsing the output.
|
||||
This helper function is available for all model providers that support structured output.
|
||||
|
||||
@@ -99,20 +99,10 @@ Here is an example of how to use JSON mode with OpenAI:
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
model = ChatOpenAI(model="gpt-4o", model_kwargs={ "response_format": { "type": "json_object" } })
|
||||
model = ChatOpenAI(model="gpt-4o").with_structured_output(method="json_mode")
|
||||
ai_msg = model.invoke("Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]")
|
||||
ai_msg.content
|
||||
'\n{\n "random_ints": [23, 47, 89, 15, 34, 76, 58, 3, 62, 91]\n}'
|
||||
```
|
||||
|
||||
One important point to flag: the model *still* returns a string, which needs to be parsed into a JSON object.
|
||||
This can, of course, simply use the `json` library or a JSON output parser if you need more advanced functionality.
|
||||
See this [how-to guide on the JSON output parser](/docs/how_to/output_parser_json) for more details.
|
||||
|
||||
```python
|
||||
import json
|
||||
json_object = json.loads(ai_msg.content)
|
||||
{'random_ints': [23, 47, 89, 15, 34, 76, 58, 3, 62, 91]}
|
||||
ai_msg
|
||||
{'random_ints': [45, 67, 12, 34, 89, 23, 78, 56, 90, 11]}
|
||||
```
|
||||
|
||||
## Structured output method
|
||||
|
||||
@@ -30,7 +30,7 @@ You will sometimes hear the term `function calling`. We use this term interchang
|
||||
|
||||
## Recommended usage
|
||||
|
||||
This pseudo-code illustrates the recommended workflow for using tool calling.
|
||||
This pseudocode illustrates the recommended workflow for using tool calling.
|
||||
Created tools are passed to `.bind_tools()` method as a list.
|
||||
This model can be called, as usual. If a tool call is made, model's response will contain the tool call arguments.
|
||||
The tool call arguments can be passed directly to the tool.
|
||||
|
||||
@@ -73,17 +73,18 @@ In order to contribute an integration, you should follow these steps:
|
||||
|
||||
## Co-Marketing
|
||||
|
||||
With over 20 million monthly downloads, LangChain has a large audience of developers building LLM applications.
|
||||
Besides just adding integrations, we also like to show them examples of cool tools or APIs they can use.
|
||||
With over 20 million monthly downloads, LangChain has a large audience of developers
|
||||
building LLM applications. Beyond just listing integrations, we aim to highlight
|
||||
high-quality, educational examples that inspire developers and advance the ecosystem.
|
||||
|
||||
While traditionally called "co-marketing", we like to think of this more as "co-education".
|
||||
For that reason, while we are happy to highlight your integration through our social media channels, we prefer to highlight examples that also serve some educational purpose.
|
||||
Our main social media channels are Twitter and LinkedIn.
|
||||
While we occasionally share integrations, we prioritize content that provides
|
||||
meaningful insights and best practices. Our main social channels are Twitter and
|
||||
LinkedIn, where we highlight the best examples.
|
||||
|
||||
Here are some heuristics for types of content we are excited to promote:
|
||||
|
||||
- **Integration announcement:** If you announce the integration with a link to the LangChain documentation page, we are happy to re-tweet/re-share on Twitter/LinkedIn.
|
||||
- **Educational content:** We highlight good educational content on the weekends - if you write a good blog or make a good YouTube video, we are happy to share there! Note that we prefer content that is NOT framed as "here's how to use integration XYZ", but rather "here's how to do ABC", as we find that is more educational and helpful for developers.
|
||||
- **Integration announcement:** Announcements of new integrations with a link to the LangChain documentation page.
|
||||
- **Educational content:** Blogs, YouTube videos and other media showcasing educational content. Note that we prefer content that is NOT framed as "here's how to use integration XYZ", but rather "here's how to do ABC", as we find that is more educational and helpful for developers.
|
||||
- **End-to-end applications:** End-to-end applications are great resources for developers looking to build. We prefer to highlight applications that are more complex/agentic in nature, and that use [LangGraph](https://github.com/langchain-ai/langgraph) as the orchestration framework. We get particularly excited about anything involving long-term memory, human-in-the-loop interaction patterns, or multi-agent architectures.
|
||||
- **Research:** We love highlighting novel research! Whether it is research built on top of LangChain or that integrates with it.
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ from a template, which can be edited to implement your LangChain components.
|
||||
- [GitHub](https://github.com) account
|
||||
- [PyPi](https://pypi.org/) account
|
||||
|
||||
### Boostrapping a new Python package with langchain-cli
|
||||
### Bootstrapping a new Python package with langchain-cli
|
||||
|
||||
First, install `langchain-cli` and `poetry`:
|
||||
|
||||
@@ -104,7 +104,7 @@ dependency management and packaging, and you're welcome to use any other tools y
|
||||
- [GitHub](https://github.com) account
|
||||
- [PyPi](https://pypi.org/) account
|
||||
|
||||
### Boostrapping a new Python package with Poetry
|
||||
### Bootstrapping a new Python package with Poetry
|
||||
|
||||
First, install Poetry:
|
||||
|
||||
|
||||
@@ -35,5 +35,5 @@ Please reference our [Review Process](review_process.mdx).
|
||||
|
||||
### I think my PR was closed in a way that didn't follow the review process. What should I do?
|
||||
|
||||
Tag `@efriis` in the PR comments referencing the portion of the review
|
||||
Tag `@ccurme` in the PR comments referencing the portion of the review
|
||||
process that you believe was not followed. We'll take a look!
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n",
|
||||
"- [The Runnable interface](/docs/concepts/runnables/)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Binding runtime arguments](/docs/how_to/binding/)\n",
|
||||
"\n",
|
||||
@@ -62,6 +62,163 @@
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d25f63f-a048-42f3-ac2f-e20ba99cff16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configuring fields on a chat model\n",
|
||||
"\n",
|
||||
"If using [init_chat_model](/docs/how_to/chat_models_universal_init/) to create a chat model, you can specify configurable fields in the constructor:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "92ba5e49-b2b4-432b-b8bc-b03de46dc2bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(\n",
|
||||
" \"openai:gpt-4o-mini\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" configurable_fields=(\"temperature\",),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61ef4976-9943-492b-9554-0d10e3d3ba76",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can then set the parameter at runtime using `.with_config`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "277e3232-9b77-4828-8082-b62f4d97127f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! How can I assist you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = llm.with_config({\"temperature\": 0}).invoke(\"Hello\")\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "44c5fe89-f0a6-4ff0-b419-b927e51cc9fa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"In addition to invocation parameters like temperature, configuring fields this way extends to clients and other attributes.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fed7e600-4d5e-4875-8d37-082ec926e66f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Use with tools\n",
|
||||
"\n",
|
||||
"This method is applicable when [binding tools](/docs/concepts/tool_calling/) as well:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "61a67769-4a15-49e2-a945-1f4e7ef19d8c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'get_weather',\n",
|
||||
" 'args': {'location': 'San Francisco'},\n",
|
||||
" 'id': 'call_B93EttzlGyYUhzbIIiMcl3bE',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_weather(location: str):\n",
|
||||
" \"\"\"Get the weather.\"\"\"\n",
|
||||
" return \"It's sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([get_weather])\n",
|
||||
"response = llm_with_tools.with_config({\"temperature\": 0}).invoke(\n",
|
||||
" \"What's the weather in SF?\"\n",
|
||||
")\n",
|
||||
"response.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b71c7bf5-f351-4b90-ae86-1100d2dcdfaa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In addition to `.with_config`, we can now include the parameter when passing a configuration directly. See example below, where we allow the underlying model temperature to be configurable inside of a [langgraph agent](/docs/tutorials/agents/):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9bb36a46-7b67-4f11-b043-771f3005f493",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install --upgrade langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "093d1c7d-1a64-4e6a-849f-075526b9b3ca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(llm, [get_weather])\n",
|
||||
"\n",
|
||||
"response = agent.invoke(\n",
|
||||
" {\"messages\": \"What's the weather in Boston?\"},\n",
|
||||
" {\"configurable\": {\"temperature\": 0}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9dc5be03-528f-4532-8cb0-1f149dddedc9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configuring fields on arbitrary Runnables\n",
|
||||
"\n",
|
||||
"You can also use the `.configurable_fields` method on arbitrary [Runnables](/docs/concepts/runnables/), as shown below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@@ -604,7 +761,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -492,7 +492,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"id": "1dad8f8e",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
@@ -504,13 +504,14 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"from langchain_core.tools.base import ArgsSchema\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -524,7 +525,7 @@
|
||||
"class CustomCalculatorTool(BaseTool):\n",
|
||||
" name: str = \"Calculator\"\n",
|
||||
" description: str = \"useful for when you need to answer questions about math\"\n",
|
||||
" args_schema: Type[BaseModel] = CalculatorInput\n",
|
||||
" args_schema: Optional[ArgsSchema] = CalculatorInput\n",
|
||||
" return_direct: bool = True\n",
|
||||
"\n",
|
||||
" def _run(\n",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"\n",
|
||||
"This covers how to load `HTML` documents into a LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
|
||||
"\n",
|
||||
"Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/) and [BeautifulSoup4](https://beautiful-soup-4.readthedocs.io/en/latest/), which can be installed via pip. Head over to the integrations page to find integrations with additional services, such as [Azure AI Document Intelligence](/docs/integrations/document_loaders/azure_document_intelligence) or [FireCrawl](/docs/integrations/document_loaders/firecrawl).\n",
|
||||
"Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://docs.unstructured.io) and [BeautifulSoup4](https://beautiful-soup-4.readthedocs.io/en/latest/), which can be installed via pip. Head over to the integrations page to find integrations with additional services, such as [Azure AI Document Intelligence](/docs/integrations/document_loaders/azure_document_intelligence) or [FireCrawl](/docs/integrations/document_loaders/firecrawl).\n",
|
||||
"\n",
|
||||
"## Loading HTML with Unstructured"
|
||||
]
|
||||
|
||||
@@ -161,7 +161,7 @@
|
||||
"source": [
|
||||
"## Adding human approval\n",
|
||||
"\n",
|
||||
"Let's add a step in the chain that will ask a person to approve or reject the tall call request.\n",
|
||||
"Let's add a step in the chain that will ask a person to approve or reject the tool call request.\n",
|
||||
"\n",
|
||||
"On rejection, the step will raise an exception which will stop execution of the rest of the chain."
|
||||
]
|
||||
|
||||
@@ -30,7 +30,8 @@
|
||||
"1. The resulting chat history should be **valid**. Usually this means that the following properties should be satisfied:\n",
|
||||
" - The chat history **starts** with either (1) a `HumanMessage` or (2) a [SystemMessage](/docs/concepts/messages/#systemmessage) followed by a `HumanMessage`.\n",
|
||||
" - The chat history **ends** with either a `HumanMessage` or a `ToolMessage`.\n",
|
||||
" - A `ToolMessage` can only appear after an `AIMessage` that involved a tool call. \n",
|
||||
" - A `ToolMessage` can only appear after an `AIMessage` that involved a tool call.\n",
|
||||
"\n",
|
||||
" This can be achieved by setting `start_on=\"human\"` and `ends_on=(\"human\", \"tool\")`.\n",
|
||||
"3. It includes recent messages and drops old messages in the chat history.\n",
|
||||
" This can be achieved by setting `strategy=\"last\"`.\n",
|
||||
|
||||
@@ -102,6 +102,16 @@
|
||||
"%pip install -qU langchain-anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fe4993ad-4a9b-4021-8ebd-f0fbbc739f49",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::info This guide requires ``langchain-anthropic>=0.3.10``\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
@@ -245,7 +255,7 @@
|
||||
"source": [
|
||||
"## Content blocks\n",
|
||||
"\n",
|
||||
"One key difference to note between Anthropic models and most others is that the contents of a single Anthropic AI message can either be a single string or a **list of content blocks**. For example when an Anthropic model invokes a tool, the tool invocation is part of the message content (as well as being exposed in the standardized `AIMessage.tool_calls`):"
|
||||
"Content from a single Anthropic AI message can either be a single string or a **list of content blocks**. For example when an Anthropic model invokes a tool, the tool invocation is part of the message content (as well as being exposed in the standardized `AIMessage.tool_calls`):"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -315,6 +325,430 @@
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6e36d25c-f358-49e5-aefa-b99fbd3fec6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extended thinking\n",
|
||||
"\n",
|
||||
"Claude 3.7 Sonnet supports an [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) feature, which will output the step-by-step reasoning process that led to its final answer.\n",
|
||||
"\n",
|
||||
"To use it, specify the `thinking` parameter when initializing `ChatAnthropic`. It can also be passed in as a kwarg during invocation.\n",
|
||||
"\n",
|
||||
"You will need to specify a token budget to use this feature. See usage example below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a34cf93b-8522-43a6-a3f3-8a189ddf54a7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[\n",
|
||||
" {\n",
|
||||
" \"signature\": \"ErUBCkYIARgCIkCx7bIPj35jGPHpoVOB2y5hvPF8MN4lVK75CYGftmVNlI4axz2+bBbSexofWsN1O/prwNv8yPXnIXQmwT6zrJsKEgwJzvks0yVRZtaGBScaDOm9xcpOxbuhku1zViIw9WDgil/KZL8DsqWrhVpC6TzM0RQNCcsHcmgmyxbgG9g8PR0eJGLxCcGoEw8zMQu1Kh1hQ1/03hZ2JCOgigpByR9aNPTwwpl64fQUe6WwIw==\",\n",
|
||||
" \"thinking\": \"To find the cube root of 50.653, I need to find the value of $x$ such that $x^3 = 50.653$.\\n\\nI can try to estimate this first. \\n$3^3 = 27$\\n$4^3 = 64$\\n\\nSo the cube root of 50.653 will be somewhere between 3 and 4, but closer to 4.\\n\\nLet me try to compute this more precisely. I can use the cube root function:\\n\\ncube root of 50.653 = 50.653^(1/3)\\n\\nLet me calculate this:\\n50.653^(1/3) \\u2248 3.6998\\n\\nLet me verify:\\n3.6998^3 \\u2248 50.6533\\n\\nThat's very close to 50.653, so I'm confident that the cube root of 50.653 is approximately 3.6998.\\n\\nActually, let me compute this more precisely:\\n50.653^(1/3) \\u2248 3.69981\\n\\nLet me verify once more:\\n3.69981^3 \\u2248 50.652998\\n\\nThat's extremely close to 50.653, so I'll say that the cube root of 50.653 is approximately 3.69981.\",\n",
|
||||
" \"type\": \"thinking\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"text\": \"The cube root of 50.653 is approximately 3.6998.\\n\\nTo verify: 3.6998\\u00b3 = 50.6530, which is very close to our original number.\",\n",
|
||||
" \"type\": \"text\"\n",
|
||||
" }\n",
|
||||
"]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(\n",
|
||||
" model=\"claude-3-7-sonnet-latest\",\n",
|
||||
" max_tokens=5000,\n",
|
||||
" thinking={\"type\": \"enabled\", \"budget_tokens\": 2000},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is the cube root of 50.653?\")\n",
|
||||
"print(json.dumps(response.content, indent=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "34349dfe-5d81-4887-a4f4-cd01e9587cdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompt caching\n",
|
||||
"\n",
|
||||
"Anthropic supports [caching](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching) of [elements of your prompts](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#what-can-be-cached), including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/docs/concepts/few_shot_prompting/), and other data to reduce latency and costs.\n",
|
||||
"\n",
|
||||
"To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below:\n",
|
||||
"\n",
|
||||
"### Messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "babb44a5-33f7-4200-9dfc-be867cf2c217",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"First invocation:\n",
|
||||
"{'cache_read': 0, 'cache_creation': 1458}\n",
|
||||
"\n",
|
||||
"Second:\n",
|
||||
"{'cache_read': 1458, 'cache_creation': 0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-7-sonnet-20250219\")\n",
|
||||
"\n",
|
||||
"# Pull LangChain readme\n",
|
||||
"get_response = requests.get(\n",
|
||||
" \"https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md\"\n",
|
||||
")\n",
|
||||
"readme = get_response.text\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"You are a technology expert.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": f\"{readme}\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" \"cache_control\": {\"type\": \"ephemeral\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What's LangChain, according to its README?\",\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"response_1 = llm.invoke(messages)\n",
|
||||
"response_2 = llm.invoke(messages)\n",
|
||||
"\n",
|
||||
"usage_1 = response_1.usage_metadata[\"input_token_details\"]\n",
|
||||
"usage_2 = response_2.usage_metadata[\"input_token_details\"]\n",
|
||||
"\n",
|
||||
"print(f\"First invocation:\\n{usage_1}\")\n",
|
||||
"print(f\"\\nSecond:\\n{usage_2}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "141ce9c5-012d-4502-9d61-4a413b5d959a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "1de82015-810f-4ed4-a08b-9866ea8746ce",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"First invocation:\n",
|
||||
"{'cache_read': 0, 'cache_creation': 1809}\n",
|
||||
"\n",
|
||||
"Second:\n",
|
||||
"{'cache_read': 1809, 'cache_creation': 0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_anthropic import convert_to_anthropic_tool\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"# For demonstration purposes, we artificially expand the\n",
|
||||
"# tool description.\n",
|
||||
"description = (\n",
|
||||
" f\"Get the weather at a location. By the way, check out this readme: {readme}\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(description=description)\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" return \"It's sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Enable caching on the tool\n",
|
||||
"# highlight-start\n",
|
||||
"weather_tool = convert_to_anthropic_tool(get_weather)\n",
|
||||
"weather_tool[\"cache_control\"] = {\"type\": \"ephemeral\"}\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-7-sonnet-20250219\")\n",
|
||||
"llm_with_tools = llm.bind_tools([weather_tool])\n",
|
||||
"query = \"What's the weather in San Francisco?\"\n",
|
||||
"\n",
|
||||
"response_1 = llm_with_tools.invoke(query)\n",
|
||||
"response_2 = llm_with_tools.invoke(query)\n",
|
||||
"\n",
|
||||
"usage_1 = response_1.usage_metadata[\"input_token_details\"]\n",
|
||||
"usage_2 = response_2.usage_metadata[\"input_token_details\"]\n",
|
||||
"\n",
|
||||
"print(f\"First invocation:\\n{usage_1}\")\n",
|
||||
"print(f\"\\nSecond:\\n{usage_2}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a763830d-82cb-448a-ab30-f561522791b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Incremental caching in conversational applications\n",
|
||||
"\n",
|
||||
"Prompt caching can be used in [multi-turn conversations](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#continuing-a-multi-turn-conversation) to maintain context from earlier messages without redundant processing.\n",
|
||||
"\n",
|
||||
"We can enable incremental caching by marking the final message with `cache_control`. Claude will automatically use the longest previously-cached prefix for follow-up messages.\n",
|
||||
"\n",
|
||||
"Below, we implement a simple chatbot that incorporates this feature. We follow the LangChain [chatbot tutorial](/docs/tutorials/chatbot/), but add a custom [reducer](https://langchain-ai.github.io/langgraph/concepts/low_level/#reducers) that automatically marks the last content block in each user message with `cache_control`. See below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "07fde4db-344c-49bc-a5b4-99e2d20fb394",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, StateGraph, add_messages\n",
|
||||
"from typing_extensions import Annotated, TypedDict\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-7-sonnet-20250219\")\n",
|
||||
"\n",
|
||||
"# Pull LangChain readme\n",
|
||||
"get_response = requests.get(\n",
|
||||
" \"https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md\"\n",
|
||||
")\n",
|
||||
"readme = get_response.text\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def messages_reducer(left: list, right: list) -> list:\n",
|
||||
" # Update last user message\n",
|
||||
" for i in range(len(right) - 1, -1, -1):\n",
|
||||
" if right[i].type == \"human\":\n",
|
||||
" right[i].content[-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n",
|
||||
" break\n",
|
||||
"\n",
|
||||
" return add_messages(left, right)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class State(TypedDict):\n",
|
||||
" messages: Annotated[list, messages_reducer]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"workflow = StateGraph(state_schema=State)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: State):\n",
|
||||
" response = llm.invoke(state[\"messages\"])\n",
|
||||
" return {\"messages\": [response]}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the (single) node in the graph\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"# Add memory\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "40013035-eb22-4327-8aaf-1ee974d9ff46",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello, Bob! It's nice to meet you. How are you doing today? Is there something I can help you with?\n",
|
||||
"\n",
|
||||
"{'cache_read': 0, 'cache_creation': 0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"query = \"Hi! I'm Bob.\"\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage([{\"type\": \"text\", \"text\": query}])\n",
|
||||
"output = app.invoke({\"messages\": [input_message]}, config)\n",
|
||||
"output[\"messages\"][-1].pretty_print()\n",
|
||||
"print(f'\\n{output[\"messages\"][-1].usage_metadata[\"input_token_details\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "22371f68-7913-4c4f-ab4a-2b4265095469",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"I can see you've shared the README from the LangChain GitHub repository. This is the documentation for LangChain, which is a popular framework for building applications powered by Large Language Models (LLMs). Here's a summary of what the README contains:\n",
|
||||
"\n",
|
||||
"LangChain is:\n",
|
||||
"- A framework for developing LLM-powered applications\n",
|
||||
"- Helps chain together components and integrations to simplify AI application development\n",
|
||||
"- Provides a standard interface for models, embeddings, vector stores, etc.\n",
|
||||
"\n",
|
||||
"Key features/benefits:\n",
|
||||
"- Real-time data augmentation (connect LLMs to diverse data sources)\n",
|
||||
"- Model interoperability (swap models easily as needed)\n",
|
||||
"- Large ecosystem of integrations\n",
|
||||
"\n",
|
||||
"The LangChain ecosystem includes:\n",
|
||||
"- LangSmith - For evaluations and observability\n",
|
||||
"- LangGraph - For building complex agents with customizable architecture\n",
|
||||
"- LangGraph Platform - For deployment and scaling of agents\n",
|
||||
"\n",
|
||||
"The README also mentions installation instructions (`pip install -U langchain`) and links to various resources including tutorials, how-to guides, conceptual guides, and API references.\n",
|
||||
"\n",
|
||||
"Is there anything specific about LangChain you'd like to know more about, Bob?\n",
|
||||
"\n",
|
||||
"{'cache_read': 0, 'cache_creation': 1498}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = f\"Check out this readme: {readme}\"\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage([{\"type\": \"text\", \"text\": query}])\n",
|
||||
"output = app.invoke({\"messages\": [input_message]}, config)\n",
|
||||
"output[\"messages\"][-1].pretty_print()\n",
|
||||
"print(f'\\n{output[\"messages\"][-1].usage_metadata[\"input_token_details\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0e6798fc-8a80-4324-b4e3-f18706256c61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Your name is Bob. You introduced yourself at the beginning of our conversation.\n",
|
||||
"\n",
|
||||
"{'cache_read': 1498, 'cache_creation': 269}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What was my name again?\"\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage([{\"type\": \"text\", \"text\": query}])\n",
|
||||
"output = app.invoke({\"messages\": [input_message]}, config)\n",
|
||||
"output[\"messages\"][-1].pretty_print()\n",
|
||||
"print(f'\\n{output[\"messages\"][-1].usage_metadata[\"input_token_details\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa4b3647-c672-4782-a88c-a55fd3bf969f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In the [LangSmith trace](https://smith.langchain.com/public/4d0584d8-5f9e-4b91-8704-93ba2ccf416a/r), toggling \"raw output\" will show exactly what messages are sent to the chat model, including `cache_control` keys."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "029009f2-2795-418b-b5fc-fb996c6fe99e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Token-efficient tool use\n",
|
||||
"\n",
|
||||
"Anthropic supports a (beta) [token-efficient tool use](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/token-efficient-tool-use) feature. To use it, specify the relevant beta-headers when instantiating the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "206cff65-33b8-4a88-9b1a-050b4d57772a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'get_weather', 'args': {'location': 'San Francisco'}, 'id': 'toolu_01EoeE1qYaePcmNbUvMsWtmA', 'type': 'tool_call'}]\n",
|
||||
"\n",
|
||||
"Total tokens: 408\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(\n",
|
||||
" model=\"claude-3-7-sonnet-20250219\",\n",
|
||||
" temperature=0,\n",
|
||||
" # highlight-start\n",
|
||||
" model_kwargs={\n",
|
||||
" \"extra_headers\": {\"anthropic-beta\": \"token-efficient-tools-2025-02-19\"}\n",
|
||||
" },\n",
|
||||
" # highlight-end\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" \"\"\"Get the weather at a location.\"\"\"\n",
|
||||
" return \"It's sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([get_weather])\n",
|
||||
"response = llm_with_tools.invoke(\"What's the weather in San Francisco?\")\n",
|
||||
"print(response.tool_calls)\n",
|
||||
"print(f'\\nTotal tokens: {response.usage_metadata[\"total_tokens\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "301d372f-4dec-43e6-b58c-eee25633e1a6",
|
||||
@@ -472,6 +906,58 @@
|
||||
"response.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cbfec7a9-d9df-4d12-844e-d922456dd9bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in tools\n",
|
||||
"\n",
|
||||
"Anthropic supports a variety of [built-in tools](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/text-editor-tool), which can be bound to the model in the [usual way](/docs/how_to/tool_calling/). Claude will generate tool calls adhering to its internal schema for the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "30a0af36-2327-4b1d-9ba5-e47cb72db0be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I'd be happy to help you fix the syntax error in your primes.py file. First, let's look at the current content of the file to identify the error.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'str_replace_editor',\n",
|
||||
" 'args': {'command': 'view', 'path': '/repo/primes.py'},\n",
|
||||
" 'id': 'toolu_01VdNgt1YV7kGfj9LFLm6HyQ',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-7-sonnet-20250219\")\n",
|
||||
"\n",
|
||||
"tool = {\"type\": \"text_editor_20250124\", \"name\": \"str_replace_editor\"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(\n",
|
||||
" \"There's a syntax error in my primes.py file. Can you help me fix it?\"\n",
|
||||
")\n",
|
||||
"print(response.text())\n",
|
||||
"response.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
|
||||
@@ -81,8 +81,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
253
docs/docs/integrations/chat/contextual.ipynb
Normal file
253
docs/docs/integrations/chat/contextual.ipynb
Normal file
@@ -0,0 +1,253 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: ContextualAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatContextual\n",
|
||||
"\n",
|
||||
"This will help you getting started with Contextual AI's Grounded Language Model [chat models](/docs/concepts/chat_models/).\n",
|
||||
"\n",
|
||||
"To learn more about Contextual AI, please visit our [documentation](https://docs.contextual.ai/).\n",
|
||||
"\n",
|
||||
"This integration requires the `contextual-client` Python SDK. Learn more about it [here](https://github.com/ContextualAI/contextual-client-python).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"This integration invokes Contextual AI's Grounded Language Model.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatContextual](https://github.com/ContextualAI//langchain-contextual) | [langchain-contextual](https://pypi.org/project/langchain-contextual/) | ❌ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Contextual models you'll need to create a Contextual AI account, get an API key, and install the `langchain-contextual` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [app.contextual.ai](https://app.contextual.ai) to sign up to Contextual and generate an API key. Once you've done this set the CONTEXTUAL_AI_API_KEY environment variable:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"CONTEXTUAL_AI_API_KEY\"):\n",
|
||||
" os.environ[\"CONTEXTUAL_AI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your Contextual API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Contextual integration lives in the `langchain-contextual` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-contextual"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions.\n",
|
||||
"\n",
|
||||
"The chat client can be instantiated with these following additional settings:\n",
|
||||
"\n",
|
||||
"| Parameter | Type | Description | Default |\n",
|
||||
"|-----------|------|-------------|---------|\n",
|
||||
"| temperature | Optional[float] | The sampling temperature, which affects the randomness in the response. Note that higher temperature values can reduce groundedness. | 0 |\n",
|
||||
"| top_p | Optional[float] | A parameter for nucleus sampling, an alternative to temperature which also affects the randomness of the response. Note that higher top_p values can reduce groundedness. | 0.9 |\n",
|
||||
"| max_new_tokens | Optional[int] | The maximum number of tokens that the model can generate in the response. Minimum is 1 and maximum is 2048. | 1024 |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_contextual import ChatContextual\n",
|
||||
"\n",
|
||||
"llm = ChatContextual(\n",
|
||||
" model=\"v1\", # defaults to `v1`\n",
|
||||
" api_key=\"\",\n",
|
||||
" temperature=0, # defaults to 0\n",
|
||||
" top_p=0.9, # defaults to 0.9\n",
|
||||
" max_new_tokens=1024, # defaults to 1024\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"The Contextual Grounded Language Model accepts additional `kwargs` when calling the `ChatContextual.invoke` method.\n",
|
||||
"\n",
|
||||
"These additional inputs are:\n",
|
||||
"\n",
|
||||
"| Parameter | Type | Description |\n",
|
||||
"|-----------|------|-------------|\n",
|
||||
"| knowledge | list[str] | Required: A list of strings of knowledge sources the grounded language model can use when generating a response. |\n",
|
||||
"| system_prompt | Optional[str] | Optional: Instructions the model should follow when generating responses. Note that we do not guarantee that the model follows these instructions exactly. |\n",
|
||||
"| avoid_commentary | Optional[bool] | Optional (Defaults to `False`): Flag to indicate whether the model should avoid providing additional commentary in responses. Commentary is conversational in nature and does not contain verifiable claims; therefore, commentary is not strictly grounded in available context. However, commentary may provide useful context which improves the helpfulness of responses. |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# include a system prompt (optional)\n",
|
||||
"system_prompt = \"You are a helpful assistant that uses all of the provided knowledge to answer the user's query to the best of your ability.\"\n",
|
||||
"\n",
|
||||
"# provide your own knowledge from your knowledge-base here in an array of string\n",
|
||||
"knowledge = [\n",
|
||||
" \"There are 2 types of dogs in the world: good dogs and best dogs.\",\n",
|
||||
" \"There are 2 types of cats in the world: good cats and best cats.\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# create your message\n",
|
||||
"messages = [\n",
|
||||
" (\"human\", \"What type of cats are there in the world and what are the types?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# invoke the GLM by providing the knowledge strings, optional system prompt\n",
|
||||
"# if you want to turn off the GLM's commentary, pass True to the `avoid_commentary` argument\n",
|
||||
"ai_msg = llm.invoke(\n",
|
||||
" messages, knowledge=knowledge, system_prompt=system_prompt, avoid_commentary=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2c35a9e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can chain the Contextual Model with output parsers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "545e1e16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"chain = llm | StrOutputParser\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
" messages, knowledge=knowledge, systemp_prompt=system_prompt, avoid_commentary=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatContextual features and configurations head to the Github page: https://github.com/ContextualAI//langchain-contextual"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -38,6 +38,12 @@
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"DeepSeek-R1, specified via `model=\"deepseek-reasoner\"`, does not support tool calling or structured output. Those features [are supported](https://api-docs.deepseek.com/guides/function_calling) by DeepSeek-V3 (specified via `model=\"deepseek-chat\"`).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access DeepSeek models you'll need to create a/an DeepSeek account, get an API key, and install the `langchain-deepseek` integration package.\n",
|
||||
|
||||
@@ -85,21 +85,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"execution_count": null,
|
||||
"id": "3f3f510e-2afe-4e76-be41-c5a9665aea63",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-groq"
|
||||
]
|
||||
@@ -116,7 +105,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -124,7 +113,7 @@
|
||||
"from langchain_groq import ChatGroq\n",
|
||||
"\n",
|
||||
"llm = ChatGroq(\n",
|
||||
" model=\"mixtral-8x7b-32768\",\n",
|
||||
" model=\"llama-3.1-8b-instant\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
@@ -143,7 +132,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -152,10 +141,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I enjoy programming. (The French translation is: \"J\\'aime programmer.\")\\n\\nNote: I chose to translate \"I love programming\" as \"J\\'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.', response_metadata={'token_usage': {'completion_tokens': 73, 'prompt_tokens': 31, 'total_tokens': 104, 'completion_time': 0.1140625, 'prompt_time': 0.003352463, 'queue_time': None, 'total_time': 0.117414963}, 'model_name': 'mixtral-8x7b-32768', 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', 'logprobs': None}, id='run-64433c19-eadf-42fc-801e-3071e3c40160-0', usage_metadata={'input_tokens': 31, 'output_tokens': 73, 'total_tokens': 104})"
|
||||
"AIMessage(content='The translation of \"I love programming\" to French is:\\n\\n\"J\\'adore le programmation.\"', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 55, 'total_tokens': 77, 'completion_time': 0.029333333, 'prompt_time': 0.003502892, 'queue_time': 0.553054073, 'total_time': 0.032836225}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-2b2da04a-993c-40ab-becc-201eab8b1a1b-0', usage_metadata={'input_tokens': 55, 'output_tokens': 22, 'total_tokens': 77})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -174,7 +163,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -182,9 +171,9 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I enjoy programming. (The French translation is: \"J'aime programmer.\")\n",
|
||||
"The translation of \"I love programming\" to French is:\n",
|
||||
"\n",
|
||||
"Note: I chose to translate \"I love programming\" as \"J'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.\n"
|
||||
"\"J'adore le programmation.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -204,17 +193,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='That\\'s great! I can help you translate English phrases related to programming into German.\\n\\n\"I love programming\" can be translated as \"Ich liebe Programmieren\" in German.\\n\\nHere are some more programming-related phrases translated into German:\\n\\n* \"Programming language\" = \"Programmiersprache\"\\n* \"Code\" = \"Code\"\\n* \"Variable\" = \"Variable\"\\n* \"Function\" = \"Funktion\"\\n* \"Array\" = \"Array\"\\n* \"Object-oriented programming\" = \"Objektorientierte Programmierung\"\\n* \"Algorithm\" = \"Algorithmus\"\\n* \"Data structure\" = \"Datenstruktur\"\\n* \"Debugging\" = \"Fehlersuche\"\\n* \"Compile\" = \"Kompilieren\"\\n* \"Link\" = \"Verknüpfen\"\\n* \"Run\" = \"Ausführen\"\\n* \"Test\" = \"Testen\"\\n* \"Deploy\" = \"Bereitstellen\"\\n* \"Version control\" = \"Versionskontrolle\"\\n* \"Open source\" = \"Open Source\"\\n* \"Software development\" = \"Softwareentwicklung\"\\n* \"Agile methodology\" = \"Agile Methodik\"\\n* \"DevOps\" = \"DevOps\"\\n* \"Cloud computing\" = \"Cloud Computing\"\\n\\nI hope this helps! Let me know if you have any other questions or if you need further translations.', response_metadata={'token_usage': {'completion_tokens': 331, 'prompt_tokens': 25, 'total_tokens': 356, 'completion_time': 0.520006542, 'prompt_time': 0.00250165, 'queue_time': None, 'total_time': 0.522508192}, 'model_name': 'mixtral-8x7b-32768', 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', 'logprobs': None}, id='run-74207fb7-85d3-417d-b2b9-621116b75d41-0', usage_metadata={'input_tokens': 25, 'output_tokens': 331, 'total_tokens': 356})"
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 50, 'total_tokens': 56, 'completion_time': 0.008, 'prompt_time': 0.003337935, 'queue_time': 0.20949214500000002, 'total_time': 0.011337935}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-e33b48dc-5e55-466e-9ebd-7b48c81c3cbd-0', usage_metadata={'input_tokens': 50, 'output_tokens': 6, 'total_tokens': 56})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -269,7 +258,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -322,7 +322,7 @@
|
||||
"source": [
|
||||
"### ``strict=True``\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-openai>=0.1.21rc1``\n",
|
||||
":::info Requires ``langchain-openai>=0.1.21``\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -397,6 +397,405 @@
|
||||
"For more on binding tools and tool call outputs, head to the [tool calling](/docs/how_to/function_calling) docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84833dd0-17e9-4269-82ed-550639d65751",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Responses API\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-openai>=0.3.9``\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages.\n",
|
||||
"\n",
|
||||
"`ChatOpenAI` will route to the Responses API if one of these features is used. You can also specify `use_responses_api=True` when instantiating `ChatOpenAI`.\n",
|
||||
"\n",
|
||||
"### Built-in tools\n",
|
||||
"\n",
|
||||
"Equipping `ChatOpenAI` with built-in tools will ground its responses with outside information, such as via context in files or the web. The [AIMessage](/docs/concepts/messages/#aimessage) generated from the model will include information about the built-in tool invocation.\n",
|
||||
"\n",
|
||||
"#### Web search\n",
|
||||
"\n",
|
||||
"To trigger a web search, pass `{\"type\": \"web_search_preview\"}` to the model as you would another tool.\n",
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"You can also pass built-in tools as invocation params:\n",
|
||||
"```python\n",
|
||||
"llm.invoke(\"...\", tools=[{\"type\": \"web_search_preview\"}])\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0d8bfe89-948b-42d4-beac-85ef2a72491d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"tool = {\"type\": \"web_search_preview\"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(\"What was a positive news story from today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9fe67c6-38ff-40a5-93b3-a4b7fca76372",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the response includes structured [content blocks](/docs/concepts/messages/#content-1) that include both the text of the response and OpenAI [annotations](https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#output-and-citations) citing its sources:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3ea5a4b1-f57a-4c8a-97f4-60ab8330a804",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'type': 'text',\n",
|
||||
" 'text': 'Today, a heartwarming story emerged from Minnesota, where a group of high school robotics students built a custom motorized wheelchair for a 2-year-old boy named Cillian Jackson. Born with a genetic condition that limited his mobility, Cillian\\'s family couldn\\'t afford the $20,000 wheelchair he needed. The students at Farmington High School\\'s Rogue Robotics team took it upon themselves to modify a Power Wheels toy car into a functional motorized wheelchair for Cillian, complete with a joystick, safety bumpers, and a harness. One team member remarked, \"I think we won here more than we do in our competitions. Instead of completing a task, we\\'re helping change someone\\'s life.\" ([boredpanda.com](https://www.boredpanda.com/wholesome-global-positive-news/?utm_source=openai))\\n\\nThis act of kindness highlights the profound impact that community support and innovation can have on individuals facing challenges. ',\n",
|
||||
" 'annotations': [{'end_index': 778,\n",
|
||||
" 'start_index': 682,\n",
|
||||
" 'title': '“Global Positive News”: 40 Posts To Remind Us There’s Good In The World',\n",
|
||||
" 'type': 'url_citation',\n",
|
||||
" 'url': 'https://www.boredpanda.com/wholesome-global-positive-news/?utm_source=openai'}]}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95fbc34c-2f12-4d51-92c5-bf62a2f8900c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"You can recover just the text content of the response as a string by using `response.text()`. For example, to stream response text:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"for token in llm_with_tools.stream(\"...\"):\n",
|
||||
" print(token.text(), end=\"|\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the [streaming guide](/docs/how_to/chat_streaming/) for more detail.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2a332940-d409-41ee-ac36-2e9bee900e83",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The output message will also contain information from any tool invocations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "a8011049-6c90-4fcb-82d4-850c72b46941",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'tool_outputs': [{'id': 'ws_67d192aeb6cc81918e736ad4a57937570d6f8507990d9d71',\n",
|
||||
" 'status': 'completed',\n",
|
||||
" 'type': 'web_search_call'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.additional_kwargs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "288d47bb-3ccb-412f-a3d3-9f6cee0e6214",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### File search\n",
|
||||
"\n",
|
||||
"To trigger a file search, pass a [file search tool](https://platform.openai.com/docs/guides/tools-file-search) to the model as you would another tool. You will need to populate an OpenAI-managed vector store and include the vector store ID in the tool definition. See [OpenAI documentation](https://platform.openai.com/docs/guides/tools-file-search) for more detail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "1f758726-33ef-4c04-8a54-49adb783bbb3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Deep Research by OpenAI is a new capability integrated into ChatGPT that allows for the execution of multi-step research tasks independently. It can synthesize extensive amounts of online information and produce comprehensive reports similar to what a research analyst would do, significantly speeding up processes that would typically take hours for a human.\n",
|
||||
"\n",
|
||||
"### Key Features:\n",
|
||||
"- **Independent Research**: Users simply provide a prompt, and the model can find, analyze, and synthesize information from hundreds of online sources.\n",
|
||||
"- **Multi-Modal Capabilities**: The model is also able to browse user-uploaded files, plot graphs using Python, and embed visualizations in its outputs.\n",
|
||||
"- **Training**: Deep Research has been trained using reinforcement learning on real-world tasks that require extensive browsing and reasoning.\n",
|
||||
"\n",
|
||||
"### Applications:\n",
|
||||
"- Useful for professionals in sectors like finance, science, policy, and engineering, enabling them to obtain accurate and thorough research quickly.\n",
|
||||
"- It can also be beneficial for consumers seeking personalized recommendations on complex purchases.\n",
|
||||
"\n",
|
||||
"### Limitations:\n",
|
||||
"Although Deep Research presents significant advancements, it has some limitations, such as the potential to hallucinate facts or struggle with authoritative information. \n",
|
||||
"\n",
|
||||
"Deep Research aims to facilitate access to thorough and documented information, marking a significant step toward the broader goal of developing artificial general intelligence (AGI).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"openai_vector_store_ids = [\n",
|
||||
" \"vs_...\", # your IDs here\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"tool = {\n",
|
||||
" \"type\": \"file_search\",\n",
|
||||
" \"vector_store_ids\": openai_vector_store_ids,\n",
|
||||
"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(\"What is deep research by OpenAI?\")\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f88bbd71-83b0-45a6-9141-46ec9da93df6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As with [web search](#web-search), the response will include content blocks with citations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "865bc14e-1599-438e-be44-857891004979",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'file_id': 'file-3UzgX7jcC8Dt9ZAFzywg5k',\n",
|
||||
" 'index': 346,\n",
|
||||
" 'type': 'file_citation',\n",
|
||||
" 'filename': 'deep_research_blog.pdf'},\n",
|
||||
" {'file_id': 'file-3UzgX7jcC8Dt9ZAFzywg5k',\n",
|
||||
" 'index': 575,\n",
|
||||
" 'type': 'file_citation',\n",
|
||||
" 'filename': 'deep_research_blog.pdf'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.content[0][\"annotations\"][:2]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd00f6be-2862-4634-a0c3-14ee39915c90",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It will also include information from the built-in tool invocations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "e16a7110-d2d8-45fa-b372-5109f330540b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'tool_outputs': [{'id': 'fs_67d196fbb83c8191ba20586175331687089228ce932eceb1',\n",
|
||||
" 'queries': ['What is deep research by OpenAI?'],\n",
|
||||
" 'status': 'completed',\n",
|
||||
" 'type': 'file_search_call'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.additional_kwargs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fda05f0-4b81-4709-9407-f316d760ad50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Managing conversation state\n",
|
||||
"\n",
|
||||
"The Responses API supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).\n",
|
||||
"\n",
|
||||
"#### Manually manage state\n",
|
||||
"\n",
|
||||
"You can manage the state manually or using [LangGraph](/docs/tutorials/chatbot/), as with other chat models:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "51d3e4d3-ea78-426c-9205-aecb0937fca7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"As of March 12, 2025, here are some positive news stories that highlight recent uplifting events:\n",
|
||||
"\n",
|
||||
"*... exemplify positive developments in health, environmental sustainability, and community well-being. \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"tool = {\"type\": \"web_search_preview\"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"first_query = \"What was a positive news story from today?\"\n",
|
||||
"messages = [{\"role\": \"user\", \"content\": first_query}]\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(messages)\n",
|
||||
"response_text = response.text()\n",
|
||||
"print(f\"{response_text[:100]}... {response_text[-100:]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5da9d20f-9712-46f4-a395-5be5a7c1bc62",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your question was: \"What was a positive news story from today?\"\n",
|
||||
"\n",
|
||||
"The last sentence of my answer was: \"These stories exemplify positive developments in health, environmental sustainability, and community well-being.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"second_query = (\n",
|
||||
" \"Repeat my question back to me, as well as the last sentence of your answer.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages.extend(\n",
|
||||
" [\n",
|
||||
" response,\n",
|
||||
" {\"role\": \"user\", \"content\": second_query},\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"second_response = llm_with_tools.invoke(messages)\n",
|
||||
"print(second_response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5fd8ca21-8a5e-4294-af32-11f26a040171",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"You can use [LangGraph](https://langchain-ai.github.io/langgraph/) to manage conversational threads for you in a variety of backends, including in-memory and Postgres. See [this tutorial](/docs/tutorials/chatbot/) to get started.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### Passing `previous_response_id`\n",
|
||||
"\n",
|
||||
"When using the Responses API, LangChain messages will include an `\"id\"` field in its metadata. Passing this ID to subsequent invocations will continue the conversation. Note that this is [equivalent](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#openai-apis-for-conversation-state) to manually passing in messages from a billing perspective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "009e541a-b372-410e-b9dd-608a8052ce09",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hi Bob! How can I assist you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" use_responses_api=True,\n",
|
||||
")\n",
|
||||
"response = llm.invoke(\"Hi, I'm Bob.\")\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "393a443a-4c5f-4a07-bc0e-c76e529b35e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your name is Bob. How can I help you today, Bob?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"second_response = llm.invoke(\n",
|
||||
" \"What is my name?\",\n",
|
||||
" previous_response_id=response.response_metadata[\"id\"],\n",
|
||||
")\n",
|
||||
"print(second_response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "57e27714",
|
||||
|
||||
@@ -69,8 +69,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -81,8 +81,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -84,8 +84,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,22 +26,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install the package\n",
|
||||
"%pip install --upgrade --quiet dashscope"
|
||||
@@ -49,8 +36,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-05T01:11:20.457141Z",
|
||||
"start_time": "2025-03-05T01:11:18.810160Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
@@ -66,8 +57,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-05T01:11:24.270318Z",
|
||||
"start_time": "2025-03-05T01:11:24.268064Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
@@ -266,6 +261,52 @@
|
||||
"ai_message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Partial Mode\n",
|
||||
"Enable the large model to continue generating content from the initial text you provide."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-05T01:31:29.155824Z",
|
||||
"start_time": "2025-03-05T01:31:27.239667Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' has cast off its heavy cloak of snow, donning instead a vibrant garment of fresh greens and floral hues; it is as if the world has woken from a long slumber, stretching and reveling in the warm caress of the sun. Everywhere I look, there is a symphony of life: birdsong fills the air, bees dance from flower to flower, and a gentle breeze carries the sweet fragrance of blossoms. It is in this season that my heart finds particular joy, for it whispers promises of renewal and growth, reminding me that even after the coldest winters, there will always be a spring to follow.', additional_kwargs={}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': '447283e9-ee31-9d82-8734-af572921cb05', 'token_usage': {'input_tokens': 40, 'output_tokens': 127, 'prompt_tokens_details': {'cached_tokens': 0}, 'total_tokens': 167}}, id='run-6a35a91c-cc12-4afe-b56f-fd26d9035357-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"\"\"Please continue the sentence \"Spring has arrived, and the earth\" to express the beauty of spring and the author's joy.\"\"\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" content=\"Spring has arrived, and the earth\", additional_kwargs={\"partial\": True}\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"chatLLM = ChatTongyi()\n",
|
||||
"ai_message = chatLLM.invoke(messages)\n",
|
||||
"ai_message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -65,8 +65,8 @@
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
|
||||
245
docs/docs/integrations/chat/xinference.ipynb
Normal file
245
docs/docs/integrations/chat/xinference.ipynb
Normal file
@@ -0,0 +1,245 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Xinference\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01ca90da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatXinference\n",
|
||||
"\n",
|
||||
"[Xinference](https://github.com/xorbitsai/inference) is a powerful and versatile library designed to serve LLMs, \n",
|
||||
"speech recognition models, and multimodal models, even on your laptop. It supports a variety of models compatible with GGML, such as chatglm, baichuan, whisper, vicuna, orca, and many others.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support] | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ChatXinference| langchain-xinference | ✅ | ❌ | ✅ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: |:----------------------------------------------------:| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Install `Xinference` through PyPI:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet \"xinference[all]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7fb27b941602401d91542211134fc71a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deploy Xinference Locally or in a Distributed Cluster.\n",
|
||||
"\n",
|
||||
"For local deployment, run `xinference`. \n",
|
||||
"\n",
|
||||
"To deploy Xinference in a cluster, first start an Xinference supervisor using the `xinference-supervisor`. You can also use the option -p to specify the port and -H to specify the host. The default port is 8080 and the default host is 0.0.0.0.\n",
|
||||
"\n",
|
||||
"Then, start the Xinference workers using `xinference-worker` on each server you want to run them on. \n",
|
||||
"\n",
|
||||
"You can consult the README file from [Xinference](https://github.com/xorbitsai/inference) for more information.\n",
|
||||
"### Wrapper\n",
|
||||
"\n",
|
||||
"To use Xinference with LangChain, you need to first launch a model. You can use command line interface (CLI) to do so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "acae54e37e7d407bbb7b55eff062a284",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Model uid: 7167b2b0-2a04-11ee-83f0-d29396a3f064\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%xinference launch -n vicuna-v1.3 -f ggmlv3 -q q4_0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a63283cbaf04dbcab1f6479b197f3a8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A model UID is returned for you to use. Now you can use Xinference with LangChain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8dd0d8092fe74a7c96281538738b07e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"The LangChain Xinference integration lives in the `langchain-xinference` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "72eea5119410473aa328ad9291626812",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-xinference"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8edb47106e1a46a883d545849b8ab81b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Make sure you're using the latest Xinference version for structured outputs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "55935996",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "10185d26023b46108eb7d9f57d49d2b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_xinference.chat_models import ChatXinference\n",
|
||||
"\n",
|
||||
"llm = ChatXinference(\n",
|
||||
" server_url=\"your_server_url\", model_uid=\"7167b2b0-2a04-11ee-83f0-d29396a3f064\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm.invoke(\n",
|
||||
" \"Q: where can we visit in the capital of France?\",\n",
|
||||
" config={\"max_tokens\": 1024},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8763a12b2bbd4a93a75aff182afb95dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "50f6c0e4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_xinference.chat_models import ChatXinference\n",
|
||||
"\n",
|
||||
"llm = ChatXinference(\n",
|
||||
" server_url=\"your_server_url\", model_uid=\"7167b2b0-2a04-11ee-83f0-d29396a3f064\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system_message = \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n",
|
||||
"human_message = \"I love programming.\"\n",
|
||||
"\n",
|
||||
"llm.invoke([HumanMessage(content=human_message), SystemMessage(content=system_message)])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7623eae2785240b9bd12b16a66d81610",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "317d05c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_xinference.chat_models import ChatXinference\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input=[\"country\"], template=\"Q: where can we visit in the capital of {country}? A:\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatXinference(\n",
|
||||
" server_url=\"your_server_url\", model_uid=\"7167b2b0-2a04-11ee-83f0-d29396a3f064\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(input={\"country\": \"France\"})\n",
|
||||
"chain.stream(input={\"country\": \"France\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "66f7fb26",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatXinference features and configurations head to the API reference: https://github.com/TheSongg/langchain-xinference"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
265
docs/docs/integrations/document_loaders/agentql.ipynb
Normal file
265
docs/docs/integrations/document_loaders/agentql.ipynb
Normal file
@@ -0,0 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "wkUAAcGZNSJ3"
|
||||
},
|
||||
"source": [
|
||||
"# AgentQLLoader\n",
|
||||
"\n",
|
||||
"[AgentQL](https://www.agentql.com/)'s document loader provides structured data extraction from any web page using an [AgentQL query](https://docs.agentql.com/agentql-query). AgentQL can be used across multiple languages and web pages without breaking over time and change.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`AgentQLLoader` requires the following two parameters:\n",
|
||||
"- `url`: The URL of the web page you want to extract data from.\n",
|
||||
"- `query`: The AgentQL query to execute. Learn more about [how to write an AgentQL query in the docs](https://docs.agentql.com/agentql-query) or test one out in the [AgentQL Playground](https://dev.agentql.com/playground).\n",
|
||||
"\n",
|
||||
"Setting the following parameters are optional:\n",
|
||||
"- `api_key`: Your AgentQL API key from [dev.agentql.com](https://dev.agentql.com). **`Optional`.**\n",
|
||||
"- `timeout`: The number of seconds to wait for a request before timing out. **Defaults to `900`.**\n",
|
||||
"- `is_stealth_mode_enabled`: Whether to enable experimental anti-bot evasion strategies. This feature may not work for all websites at all times. Data extraction may take longer to complete with this mode enabled. **Defaults to `False`.**\n",
|
||||
"- `wait_for`: The number of seconds to wait for the page to load before extracting data. **Defaults to `0`.**\n",
|
||||
"- `is_scroll_to_bottom_enabled`: Whether to scroll to bottom of the page before extracting data. **Defaults to `False`.**\n",
|
||||
"- `mode`: `\"standard\"` uses deep data analysis, while `\"fast\"` trades some depth of analysis for speed and is adequate for most usecases. [Learn more about the modes in this guide.](https://docs.agentql.com/accuracy/standard-mode) **Defaults to `\"fast\"`.**\n",
|
||||
"- `is_screenshot_enabled`: Whether to take a screenshot before extracting data. Returned in 'metadata' as a Base64 string. **Defaults to `False`.**\n",
|
||||
"\n",
|
||||
"AgentQLLoader is implemented with AgentQL's [REST API](https://docs.agentql.com/rest-api/api-reference)\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| AgentQLLoader| langchain-agentql | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: |\n",
|
||||
"| AgentQLLoader | ✅ | ❌ |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "CaKa2QrnwPXq"
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use the AgentQL Document Loader, you will need to configure the `AGENTQL_API_KEY` environment variable, or use the `api_key` parameter. You can acquire an API key from our [Dev Portal](https://dev.agentql.com)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "mZNJvUQBNSJ5"
|
||||
},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain-agentql**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "IblRoJJDNSJ5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_agentql"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "SNsUT60YvfCm"
|
||||
},
|
||||
"source": [
|
||||
"### Set Credentials"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "2D1EN7Egvk1c"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"AGENTQL_API_KEY\"] = \"YOUR_AGENTQL_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "D4hnJV_6NSJ5"
|
||||
},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Next instantiate your model object:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"id": "oMJdxL_KNSJ5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_agentql.document_loaders import AgentQLLoader\n",
|
||||
"\n",
|
||||
"loader = AgentQLLoader(\n",
|
||||
" url=\"https://www.agentql.com/blog\",\n",
|
||||
" query=\"\"\"\n",
|
||||
" {\n",
|
||||
" posts[] {\n",
|
||||
" title\n",
|
||||
" url\n",
|
||||
" date\n",
|
||||
" author\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" \"\"\",\n",
|
||||
" is_scroll_to_bottom_enabled=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "SRxIOx90NSJ5"
|
||||
},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "bNnnCZ1oNSJ5",
|
||||
"outputId": "d0eb8cb4-9742-4f0c-80f1-0509a3af1808"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'request_id': 'bdb9dbe7-8a7f-427f-bc16-839ccc02cae6', 'generated_query': None, 'screenshot': None}, page_content=\"{'posts': [{'title': 'Launch Week Recap—make the web AI-ready', 'url': 'https://www.agentql.com/blog/2024-launch-week-recap', 'date': 'Nov 18, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Accurate data extraction from PDFs and images with AgentQL', 'url': 'https://www.agentql.com/blog/accurate-data-extraction-pdfs-images', 'date': 'Feb 1, 2025', 'author': 'Rachel-Lee Nabors'}, {'title': 'Introducing Scheduled Scraping Workflows', 'url': 'https://www.agentql.com/blog/scheduling', 'date': 'Dec 2, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Updates to Our Pricing Model', 'url': 'https://www.agentql.com/blog/2024-pricing-update', 'date': 'Nov 19, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5', 'url': 'https://www.agentql.com/blog/data-rest-api', 'date': 'Nov 15, 2024', 'author': 'Rachel-Lee Nabors'}]}\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "wtPMNh72NSJ5",
|
||||
"outputId": "59d529a4-3c22-445c-f5cf-dc7b24168906"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'request_id': 'bdb9dbe7-8a7f-427f-bc16-839ccc02cae6', 'generated_query': None, 'screenshot': None}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7RMuEwl4NSJ5"
|
||||
},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"`AgentQLLoader` currently only loads one `Document` at a time. Therefore, `load()` and `lazy_load()` behave the same:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "FIYddZBONSJ5",
|
||||
"outputId": "c39a7a6d-bc52-4ef9-b36f-e1d138590b79"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(metadata={'request_id': '06273abd-b2ef-4e15-b0ec-901cba7b4825', 'generated_query': None, 'screenshot': None}, page_content=\"{'posts': [{'title': 'Launch Week Recap—make the web AI-ready', 'url': 'https://www.agentql.com/blog/2024-launch-week-recap', 'date': 'Nov 18, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Accurate data extraction from PDFs and images with AgentQL', 'url': 'https://www.agentql.com/blog/accurate-data-extraction-pdfs-images', 'date': 'Feb 1, 2025', 'author': 'Rachel-Lee Nabors'}, {'title': 'Introducing Scheduled Scraping Workflows', 'url': 'https://www.agentql.com/blog/scheduling', 'date': 'Dec 2, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Updates to Our Pricing Model', 'url': 'https://www.agentql.com/blog/2024-pricing-update', 'date': 'Nov 19, 2024', 'author': 'Rachel-Lee Nabors'}, {'title': 'Get data from any page: AgentQL’s REST API Endpoint—Launch week day 5', 'url': 'https://www.agentql.com/blog/data-rest-api', 'date': 'Nov 15, 2024', 'author': 'Rachel-Lee Nabors'}]}\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pages = [doc for doc in loader.lazy_load()]\n",
|
||||
"pages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more information on how to use this integration, please refer to the [git repo](https://github.com/tinyfish-io/agentql-integrations/tree/main/langchain) or the [langchain integration documentation](https://docs.agentql.com/integrations/langchain)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -521,9 +521,9 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"- [LangChain Docling integration GitHub](https://github.com/DS4SD/docling-langchain)\n",
|
||||
"- [Docling GitHub](https://github.com/DS4SD/docling)\n",
|
||||
"- [Docling docs](https://ds4sd.github.io/docling/)"
|
||||
"- [LangChain Docling integration GitHub](https://github.com/docling-project/docling-langchain)\n",
|
||||
"- [Docling GitHub](https://github.com/docling-project/docling)\n",
|
||||
"- [Docling docs](https://docling-project.github.io/docling//)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"id": "92a22c77f03d43dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability. If you wish to do so, you can set the `LANGCHAIN_TRACING_V2` and `LANGCHAIN_API_KEY` environment variables:"
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability. If you wish to do so, you can set the `LANGSMITH_TRACING` and `LANGSMITH_API_KEY` environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,8 +76,8 @@
|
||||
"id": "98d8422ecee77403",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
|
||||
244
docs/docs/integrations/document_loaders/powerscale.ipynb
Normal file
244
docs/docs/integrations/document_loaders/powerscale.ipynb
Normal file
@@ -0,0 +1,244 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Dell PowerScale Document Loader\n",
|
||||
"\n",
|
||||
"[Dell PowerScale](https://www.dell.com/en-us/shop/powerscale-family/sf/powerscale) is an enterprise scale out storage system that hosts industry leading OneFS filesystem that can be hosted on-prem or deployed in the cloud.\n",
|
||||
"\n",
|
||||
"This document loader utilizes unique capabilities from PowerScale that can determine what files that have been modified since an application's last run and only returns modified files for processing. This will eliminate the need to re-process (chunk and embed) files that have not been changed, improving the overall data ingestion workflow.\n",
|
||||
"\n",
|
||||
"This loader requires PowerScale's MetadataIQ feature enabled. Additional information can be found on our GitHub Repo: [https://github.com/dell/powerscale-rag-connector](https://github.com/dell/powerscale-rag-connector)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/__module_name___loader)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PowerScaleDocumentLoader](https://github.com/dell/powerscale-rag-connector/blob/main/src/powerscale_rag_connector/PowerScaleDocumentLoader.py) | [powerscale-rag-connector](https://github.com/dell/powerscale-rag-connector) | ✅ | ❌ | ❌ | \n",
|
||||
"| [PowerScaleUnstructuredLoader](https://github.com/dell/powerscale-rag-connector/blob/main/src/powerscale_rag_connector/PowerScaleUnstructuredLoader.py) | [powerscale-rag-connector](https://github.com/dell/powerscale-rag-connector) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| PowerScaleDocumentLoader | ✅ | ✅ | \n",
|
||||
"| PowerScaleUnstructuredLoader | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"This document loader requires the use of a Dell PowerScale system with MetadataIQ enabled. Additional information can be found on our github page: [https://github.com/dell/powerscale-rag-connector](https://github.com/dell/powerscale-rag-connector)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The document loader lives in an external pip package and can be installed using standard tooling"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet powerscale-rag-connector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate document loader:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generic Document Loader\n",
|
||||
"\n",
|
||||
"Our generic document loader can be used to incrementally load all files from PowerScale in the following manner:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from powerscale_rag_connector import PowerScaleDocumentLoader\n",
|
||||
"\n",
|
||||
"loader = PowerScaleDocumentLoader(\n",
|
||||
" es_host_url=\"http://elasticsearch:9200\",\n",
|
||||
" es_index_name=\"metadataiq\",\n",
|
||||
" es_api_key=\"your-api-key\",\n",
|
||||
" folder_path=\"/ifs/data\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### UnstructuredLoader Loader\n",
|
||||
"\n",
|
||||
"Optionally, the `PowerScaleUnstructuredLoader` can be used to locate the changed files _and_ automatically process the files producing elements of the source file. This is done using LangChain's `UnstructuredLoader` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from powerscale_rag_connector import PowerScaleUnstructuredLoader\n",
|
||||
"\n",
|
||||
"# Or load files with the Unstructured Loader\n",
|
||||
"loader = PowerScaleUnstructuredLoader(\n",
|
||||
" es_host_url=\"http://elasticsearch:9200\",\n",
|
||||
" es_index_name=\"metadataiq\",\n",
|
||||
" es_api_key=\"your-api-key\",\n",
|
||||
" folder_path=\"/ifs/data\",\n",
|
||||
" # 'elements' mode splits the document into more granular chunks\n",
|
||||
" # Use 'single' mode if you want the entire document as a single chunk\n",
|
||||
" mode=\"elements\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The fields:\n",
|
||||
" - `es_host_url` is the endpoint to to MetadataIQ Elasticsearch database\n",
|
||||
" - `es_index_index` is the name of the index where PowerScale writes it file system metadata\n",
|
||||
" - `es_api_key` is the **encoded** version of your elasticsearch API key\n",
|
||||
" - `folder_path` is the path on PowerScale to be queried for changes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load\n",
|
||||
"\n",
|
||||
"Internally, all code is asynchronous with PowerScale and MetadataIQ and the load and lazy load methods will return a python generator. We recommend using the lazy load function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='' metadata={'source': '/ifs/pdfs/1994-Graph.Theoretic.Obstacles.to.Perfect.Hashing.TR0257.pdf', 'snapshot': 20834, 'change_types': ['ENTRY_ADDED']}),\n",
|
||||
"Document(page_content='' metadata={'source': '/ifs/pdfs/New.sendfile-FreeBSD.20.Feb.2015.pdf', 'snapshot': 20920, 'change_types': ['ENTRY_MODIFIED']}),\n",
|
||||
"Document(page_content='' metadata={'source': '/ifs/pdfs/FAST-Fast.Architecture.Sensitive.Tree.Search.on.Modern.CPUs.and.GPUs-Slides.pdf', 'snapshot': 20924, 'change_types': ['ENTRY_ADDED']})]\n"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for doc in loader.load():\n",
|
||||
" print(doc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Returned Object\n",
|
||||
"\n",
|
||||
"Both document loaders will keep track of what files were previously returned to your application. When called again, the document loader will only return new or modified files since your previous run.\n",
|
||||
"\n",
|
||||
" - The `metadata` fields in the returned `Document` will return the path on PowerScale that contains the modified file. You will use this path to read the data via NFS (or S3) and process the data in your application (e.g.: create chunks and embedding). \n",
|
||||
" - The `source` field is the path on PowerScale and not necessarily on your local system (depending on your mount strategy); OneFS expresses the entire storage system as a single tree rooted at `/ifs`.\n",
|
||||
" - The `change_types` property will inform you on what change occurred since the last one - e.g.: new, modified or delete.\n",
|
||||
"\n",
|
||||
"Your RAG application can use the information from `change_types` to add, update or delete entries your chunk and vector store.\n",
|
||||
"\n",
|
||||
"When using `PowerScaleUnstructuredLoader` the `page_content` field will be filled with data from the Unstructured Loader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"Internally, all code is asynchronous with PowerScale and MetadataIQ and the load and lazy load methods will return a python generator. We recommend using the lazy load function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" print(doc) # do something specific with the document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The same `Document` is returned as the load function with all the same properties mentioned above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Additional Examples\n",
|
||||
"\n",
|
||||
"Additional examples and code can be found on our public github webpage: [https://github.com/dell/powerscale-rag-connector/tree/main/examples](https://github.com/dell/powerscale-rag-connector/tree/main/examples) that provide full working examples. \n",
|
||||
"\n",
|
||||
" - [PowerScale LangChain Document Loader](https://github.com/dell/powerscale-rag-connector/blob/main/examples/powerscale_langchain_doc_loader.py) - Working example of our standard document loader\n",
|
||||
" - [PowerScale LangChain Unstructured Loader](https://github.com/dell/powerscale-rag-connector/blob/main/examples/powerscale_langchain_unstructured_loader.py) - Working example of our standard document loader using unstructured loader for chunking and embedding\n",
|
||||
" - [PowerScale NVIDIA Retriever Microservice Loader](https://github.com/dell/powerscale-rag-connector/blob/main/examples/powerscale_nvingest_example.py) - Working example of our document loader with NVIDIA NeMo Retriever microservices for chunking and embedding"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all PowerScale Document Loader features and configurations head to the github page: [https://github.com/dell/powerscale-rag-connector/](https://github.com/dell/powerscale-rag-connector/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -546,7 +546,7 @@
|
||||
"id": "ud_cnGszb1i9"
|
||||
},
|
||||
"source": [
|
||||
"Let's inspect a couple of reranked documents. We observe that the retriever still returns the relevant Langchain type [documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) but as part of the metadata field, we also recieve the `relevance_score` from the Ranking API."
|
||||
"Let's inspect a couple of reranked documents. We observe that the retriever still returns the relevant Langchain type [documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) but as part of the metadata field, we also receive the `relevance_score` from the Ranking API."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -64,8 +64,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
82
docs/docs/integrations/providers/ads4gpts.mdx
Normal file
82
docs/docs/integrations/providers/ads4gpts.mdx
Normal file
@@ -0,0 +1,82 @@
|
||||
# ADS4GPTs
|
||||
|
||||
> [ADS4GPTs](https://www.ads4gpts.com/) is building the open monetization backbone of the AI-Native internet. It helps AI applications monetize through advertising with a UX and Privacy first approach.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
### Using pip
|
||||
You can install the package directly from PyPI:
|
||||
|
||||
```bash
|
||||
pip install ads4gpts-langchain
|
||||
```
|
||||
|
||||
### From Source
|
||||
Alternatively, install from source:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ADS4GPTs/ads4gpts.git
|
||||
cd ads4gpts/libs/python-sdk/ads4gpts-langchain
|
||||
pip install .
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.11+
|
||||
- ADS4GPTs API Key ([Obtain API Key](https://www.ads4gpts.com))
|
||||
|
||||
## Environment Variables
|
||||
Set the following environment variables for API authentication:
|
||||
|
||||
```bash
|
||||
export ADS4GPTS_API_KEY='your-ads4gpts-api-key'
|
||||
```
|
||||
|
||||
Alternatively, API keys can be passed directly when initializing classes or stored in a `.env` file.
|
||||
|
||||
## Tools
|
||||
|
||||
ADS4GPTs provides two main tools for monetization:
|
||||
|
||||
### Ads4gptsInlineSponsoredResponseTool
|
||||
This tool fetches native, sponsored responses that can be seamlessly integrated within your AI application's outputs.
|
||||
|
||||
```python
|
||||
from ads4gpts_langchain import Ads4gptsInlineSponsoredResponseTool
|
||||
```
|
||||
|
||||
### Ads4gptsSuggestedPromptTool
|
||||
Generates sponsored prompt suggestions to enhance user engagement and provide monetization opportunities.
|
||||
|
||||
```python
|
||||
from ads4gpts_langchain import Ads4gptsSuggestedPromptTool
|
||||
```
|
||||
### Ads4gptsInlineConversationalTool
|
||||
Delivers conversational sponsored content that naturally fits within chat interfaces and dialogs.
|
||||
|
||||
```python
|
||||
from ads4gpts_langchain import Ads4gptsInlineConversationalTool
|
||||
```
|
||||
|
||||
### Ads4gptsInlineBannerTool
|
||||
Provides inline banner advertisements that can be displayed within your AI application's response.
|
||||
|
||||
```python
|
||||
from ads4gpts_langchain import Ads4gptsInlineBannerTool
|
||||
```
|
||||
|
||||
### Ads4gptsSuggestedBannerTool
|
||||
Generates banner advertisement suggestions that can be presented to users as recommended content.
|
||||
|
||||
```python
|
||||
from ads4gpts_langchain import Ads4gptsSuggestedBannerTool
|
||||
```
|
||||
|
||||
## Toolkit
|
||||
|
||||
The `Ads4gptsToolkit` combines these tools for convenient access in LangChain applications.
|
||||
|
||||
```python
|
||||
from ads4gpts_langchain import Ads4gptsToolkit
|
||||
```
|
||||
|
||||
35
docs/docs/integrations/providers/agentql.mdx
Normal file
35
docs/docs/integrations/providers/agentql.mdx
Normal file
@@ -0,0 +1,35 @@
|
||||
# AgentQL
|
||||
|
||||
[AgentQL](https://www.agentql.com/) provides web interaction and structured data extraction from any web page using an [AgentQL query](https://docs.agentql.com/agentql-query) or a Natural Language prompt. AgentQL can be used across multiple languages and web pages without breaking over time and change.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install the integration package:
|
||||
|
||||
```bash
|
||||
pip install langchain-agentql
|
||||
```
|
||||
|
||||
## API Key
|
||||
|
||||
Get an API Key from our [Dev Portal](https://dev.agentql.com/) and add it to your environment variables:
|
||||
```
|
||||
export AGENTQL_API_KEY="your-api-key-here"
|
||||
```
|
||||
|
||||
## DocumentLoader
|
||||
AgentQL's document loader provides structured data extraction from any web page using an AgentQL query.
|
||||
|
||||
```python
|
||||
from langchain_agentql.document_loaders import AgentQLLoader
|
||||
```
|
||||
See our [document loader documentation and usage example](/docs/integrations/document_loaders/agentql).
|
||||
|
||||
## Tools and Toolkits
|
||||
AgentQL tools provides web interaction and structured data extraction from any web page using an AgentQL query or a Natural Language prompt.
|
||||
|
||||
```python
|
||||
from langchain_agentql.tools import ExtractWebDataTool, ExtractWebDataBrowserTool, GetWebElementBrowserTool
|
||||
from langchain_agentql import AgentQLBrowserToolkit
|
||||
```
|
||||
See our [tools documentation and usage example](/docs/integrations/tools/agentql).
|
||||
@@ -29,7 +29,7 @@ You can use the `ApifyActorsTool` to use Apify Actors with agents.
|
||||
from langchain_apify import ApifyActorsTool
|
||||
```
|
||||
|
||||
See [this notebook](/docs/integrations/tools/apify_actors) for example usage.
|
||||
See [this notebook](/docs/integrations/tools/apify_actors) for example usage and a full example of a tool-calling agent with LangGraph in the [Apify LangGraph agent Actor template](https://apify.com/templates/python-langgraph).
|
||||
|
||||
For more information on how to use this tool, visit [the Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph).
|
||||
|
||||
|
||||
110
docs/docs/integrations/providers/contextual.ipynb
Normal file
110
docs/docs/integrations/providers/contextual.ipynb
Normal file
@@ -0,0 +1,110 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Contextual AI\n",
|
||||
"\n",
|
||||
"Contextual AI is a platform that offers state-of-the-art Retrieval-Augmented Generation (RAG) technology for enterprise applications. Our platformant models helps innovative teams build production-ready AI applications that can process millions of pages of documents with exceptional accuracy.\n",
|
||||
"\n",
|
||||
"## Grounded Language Model (GLM)\n",
|
||||
"\n",
|
||||
"The Grounded Language Model (GLM) is specifically engineered to minimize hallucinations in RAG and agentic applications. The GLM achieves:\n",
|
||||
"\n",
|
||||
"- State-of-the-art performance on the FACTS benchmark\n",
|
||||
"- Responses strictly grounded in provided knowledge sources\n",
|
||||
"\n",
|
||||
"## Using Contextual AI with LangChain\n",
|
||||
"\n",
|
||||
"See details [here](/docs/integrations/chat/contextual).\n",
|
||||
"\n",
|
||||
"This integration allows you to easily incorporate Contextual AI's GLM into your LangChain workflows. Whether you're building applications for regulated industries or security-conscious environments, Contextual AI provides the grounded and reliable responses your use cases demand.\n",
|
||||
"\n",
|
||||
"Get started with a free trial today and experience the most grounded language model for enterprise AI applications."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"According to the information available, there are two types of cats in the world:\n",
|
||||
"\n",
|
||||
"1. Good cats\n",
|
||||
"2. Best cats\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_contextual import ChatContextual\n",
|
||||
"\n",
|
||||
"# Set credentials\n",
|
||||
"if not os.getenv(\"CONTEXTUAL_AI_API_KEY\"):\n",
|
||||
" os.environ[\"CONTEXTUAL_AI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your Contextual API key: \"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# intialize Contextual llm\n",
|
||||
"llm = ChatContextual(\n",
|
||||
" model=\"v1\",\n",
|
||||
" api_key=\"\",\n",
|
||||
")\n",
|
||||
"# include a system prompt (optional)\n",
|
||||
"system_prompt = \"You are a helpful assistant that uses all of the provided knowledge to answer the user's query to the best of your ability.\"\n",
|
||||
"\n",
|
||||
"# provide your own knowledge from your knowledge-base here in an array of string\n",
|
||||
"knowledge = [\n",
|
||||
" \"There are 2 types of dogs in the world: good dogs and best dogs.\",\n",
|
||||
" \"There are 2 types of cats in the world: good cats and best cats.\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# create your message\n",
|
||||
"messages = [\n",
|
||||
" (\"human\", \"What type of cats are there in the world and what are the types?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# invoke the GLM by providing the knowledge strings, optional system prompt\n",
|
||||
"# if you want to turn off the GLM's commentary, pass True to the `avoid_commentary` argument\n",
|
||||
"ai_msg = llm.invoke(\n",
|
||||
" messages, knowledge=knowledge, system_prompt=system_prompt, avoid_commentary=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
22
docs/docs/integrations/providers/dell.mdx
Normal file
22
docs/docs/integrations/providers/dell.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
# Dell
|
||||
|
||||
Dell is a global technology company that provides a range of hardware, software, and
|
||||
services, including AI solutions. Their AI portfolio includes purpose-built
|
||||
infrastructure for AI workloads, including Dell PowerScale storage systems optimized
|
||||
for AI data management.
|
||||
|
||||
## PowerScale
|
||||
|
||||
Dell [PowerScale](https://www.dell.com/en-us/shop/powerscale-family/sf/powerscale) is
|
||||
an enterprise scale out storage system that hosts industry leading OneFS filesystem
|
||||
that can be hosted on-prem or deployed in the cloud.
|
||||
|
||||
### Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install powerscale-rag-connector
|
||||
```
|
||||
|
||||
### Document loaders
|
||||
|
||||
See detail on available loaders [here](/docs/integrations/document_loaders/powerscale).
|
||||
@@ -171,7 +171,7 @@ print(result)
|
||||
|
||||
|
||||
# Prompt declarations
|
||||
By default the prompt is is the whole function docs, unless you mark your prompt
|
||||
By default the prompt is the whole function docs, unless you mark your prompt
|
||||
|
||||
## Documenting your prompt
|
||||
|
||||
|
||||
@@ -6,15 +6,15 @@ keywords: [openai]
|
||||
|
||||
All functionality related to OpenAI
|
||||
|
||||
>[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is American artificial intelligence (AI) research laboratory
|
||||
> consisting of the non-profit `OpenAI Incorporated`
|
||||
> and its for-profit subsidiary corporation `OpenAI Limited Partnership`.
|
||||
> `OpenAI` conducts AI research with the declared intention of promoting and developing a friendly AI.
|
||||
> `OpenAI` systems run on an `Azure`-based supercomputing platform from `Microsoft`.
|
||||
|
||||
>The [OpenAI API](https://platform.openai.com/docs/models) is powered by a diverse set of models with different capabilities and price points.
|
||||
> [OpenAI](https://en.wikipedia.org/wiki/OpenAI) is American artificial intelligence (AI) research laboratory
|
||||
> consisting of the non-profit **OpenAI Incorporated**
|
||||
> and its for-profit subsidiary corporation **OpenAI Limited Partnership**.
|
||||
> **OpenAI** conducts AI research with the declared intention of promoting and developing a friendly AI.
|
||||
> **OpenAI** systems run on an **Azure**-based supercomputing platform from **Microsoft**.
|
||||
>
|
||||
> The [OpenAI API](https://platform.openai.com/docs/models) is powered by a diverse set of models with different capabilities and price points.
|
||||
>
|
||||
>[ChatGPT](https://chat.openai.com) is the Artificial Intelligence (AI) chatbot developed by `OpenAI`.
|
||||
> [ChatGPT](https://chat.openai.com) is the Artificial Intelligence (AI) chatbot developed by `OpenAI`.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
138
docs/docs/integrations/providers/opengradient.ipynb
Normal file
138
docs/docs/integrations/providers/opengradient.ipynb
Normal file
@@ -0,0 +1,138 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: OpenGradient\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenGradient\n",
|
||||
"[OpenGradient](https://www.opengradient.ai/) is a decentralized AI computing network enabling globally accessible, permissionless, and verifiable ML model inference.\n",
|
||||
"\n",
|
||||
"The OpenGradient langchain package currently offers a toolkit that allows developers to build their own custom ML inference tools for models on the OpenGradient network. This was previously a challenge because of the context-window polluting nature of large model parameters -- imagine having to give your agent a 200x200 array of floating-point data!\n",
|
||||
"\n",
|
||||
"The toolkit solves this problem by encapsulating all data processing logic within the tool definition itself. This approach keeps the agent's context window clean while giving developers complete flexibility to implement custom data processing and live-data retrieval for their ML models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation and Setup\n",
|
||||
"Ensure that you have an OpenGradient API key in order to access the OpenGradient network. If you already have an API key, simply set the environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export OPENGRADIENT_PRIVATE_KEY=\"your-api-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you need to set up a new API key, download the opengradient SDK and follow the instructions to initialize a new configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install opengradient\n",
|
||||
"!opengradient config init"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once you have set up your API key, install the langchain-opengradient package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "powershell"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -U langchain-opengradient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OpenGradient Toolkit\n",
|
||||
"The OpenGradientToolkit empowers developers to create specialized tools based on [ML models](https://hub.opengradient.ai/models) and [workflows](https://docs.opengradient.ai/developers/sdk/ml_workflows.html) deployed on the OpenGradient decentralized network. This integration enables LangChain agents to access powerful ML capabilities while maintaining efficient context usage.\n",
|
||||
"\n",
|
||||
"### Key Benefits\n",
|
||||
"* 🔄 Real-time data integration - Process live data feeds within your tools\n",
|
||||
"\n",
|
||||
"* 🎯 Dynamic processing - Custom data pipelines that adapt to specific agent inputs\n",
|
||||
"\n",
|
||||
"* 🧠 Context efficiency - Handle complex ML operations without flooding your context window\n",
|
||||
"\n",
|
||||
"* 🔌 Seamless deployment - Easy integration with models already on the OpenGradient network\n",
|
||||
"\n",
|
||||
"* 🔧 Full customization - Create and deploy your own specific models through the [OpenGradient SDK](https://docs.opengradient.ai/developers/sdk/model_management.html), then build custom tools from them\n",
|
||||
"\n",
|
||||
"* 🔐 Verifiable inference - All inferences run on the decentralized OpenGradient network, allowing users to choose various flavors of security such as ZKML and TEE for trustless, verifiable model execution\n",
|
||||
"\n",
|
||||
"For detailed examples and implementation guides, check out our [comprehensive tutorial](/docs/integrations/tools/opengradient_toolkit.ipynb)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -4,7 +4,7 @@ SWI-Prolog offers a comprehensive free Prolog environment.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install lanchain-prolog using pip:
|
||||
Once SWI-Prolog has been installed, install lanchain-prolog using pip:
|
||||
```bash
|
||||
pip install langchain-prolog
|
||||
```
|
||||
|
||||
15
docs/docs/integrations/providers/tableau.mdx
Normal file
15
docs/docs/integrations/providers/tableau.mdx
Normal file
@@ -0,0 +1,15 @@
|
||||
# Tableau
|
||||
|
||||
[Tableau](https://www.tableau.com/) is an analytics platform that enables anyone to
|
||||
see and understand data.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain-tableau
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
See detail on available tools [here](/docs/integrations/tools/tableau).
|
||||
49
docs/docs/integrations/providers/taiga.mdx
Normal file
49
docs/docs/integrations/providers/taiga.mdx
Normal file
@@ -0,0 +1,49 @@
|
||||
# Taiga
|
||||
|
||||
> [Taiga](https://docs.taiga.io/) is an open-source project management platform designed for agile teams, offering features like Kanban, Scrum, and issue tracking.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install the `langchain-taiga` package:
|
||||
|
||||
```bash
|
||||
pip install langchain-taiga
|
||||
```
|
||||
|
||||
You must provide a logins via environment variable so the tools can authenticate.
|
||||
|
||||
```bash
|
||||
export TAIGA_URL="https://taiga.xyz.org/"
|
||||
export TAIGA_API_URL="https://taiga.xyz.org/"
|
||||
export TAIGA_USERNAME="username"
|
||||
export TAIGA_PASSWORD="pw"
|
||||
export OPENAI_API_KEY="OPENAI_API_KEY"
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Tools
|
||||
|
||||
See a [usage example](/docs/integrations/tools/taiga)
|
||||
|
||||
---
|
||||
|
||||
## Toolkit
|
||||
|
||||
`TaigaToolkit` groups multiple Taiga-related tools into a single interface.
|
||||
|
||||
```python
|
||||
from langchain_taiga.toolkits import TaigaToolkit
|
||||
|
||||
toolkit = TaigaToolkit()
|
||||
tools = toolkit.get_tools()
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Future Integrations
|
||||
|
||||
|
||||
Check the [Taiga Developer Docs](https://docs.taiga.io/) for more information, and watch for updates or advanced usage examples in the [langchain_taiga GitHub repo](https://github.com/Shikenso-Analytics/langchain-taiga).
|
||||
18
docs/docs/integrations/providers/tavily.mdx
Normal file
18
docs/docs/integrations/providers/tavily.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
# Tavily
|
||||
|
||||
[Tavily](https://tavily.com) Tavily is a search engine, specifically designed for AI agents.
|
||||
Tavily provides both a search and extract API, AI developers can effortlessly integrate their
|
||||
applications with realtime online information. Tavily’s primary mission is to provide factual
|
||||
and reliable information from trusted sources, enhancing the accuracy and reliability of AI
|
||||
generated content and reasoning.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain-tavily
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
See detail on available tools [tavily_search](/docs/integrations/tools/tavily_search)
|
||||
and [tavily_extract](/docs/integrations/tools/tavily_extract).
|
||||
63
docs/docs/integrations/providers/valthera.mdx
Normal file
63
docs/docs/integrations/providers/valthera.mdx
Normal file
@@ -0,0 +1,63 @@
|
||||
# Valthera
|
||||
|
||||
> [Valthera](https://github.com/valthera/valthera) is an open-source framework that empowers LLM Agents to drive meaningful, context-aware user engagement. It evaluates user motivation and ability in real time, ensuring that notifications and actions are triggered only when users are most receptive.
|
||||
>
|
||||
> **langchain-valthera** integrates Valthera with LangChain, enabling developers to build smarter, behavior-driven engagement systems that deliver personalized interactions.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
### Install langchain-valthera
|
||||
|
||||
Install the LangChain Valthera package via pip:
|
||||
|
||||
```bash
|
||||
pip install -U langchain-valthera
|
||||
```
|
||||
|
||||
Import the ValtheraTool:
|
||||
|
||||
```python
|
||||
from langchain_valthera.tools import ValtheraTool
|
||||
```
|
||||
|
||||
### Example: Initializing the ValtheraTool for LangChain
|
||||
|
||||
This example shows how to initialize the ValtheraTool using a `DataAggregator` and configuration for motivation and ability scoring.
|
||||
|
||||
```python
|
||||
import os
|
||||
from langchain_openai import ChatOpenAI
|
||||
from valthera.aggregator import DataAggregator
|
||||
from mocks import hubspot, posthog, snowflake # Replace these with your actual connector implementations
|
||||
from langchain_valthera.tools import ValtheraTool
|
||||
|
||||
# Initialize the DataAggregator with your data connectors
|
||||
data_aggregator = DataAggregator(
|
||||
connectors={
|
||||
"hubspot": hubspot(),
|
||||
"posthog": posthog(),
|
||||
"app_db": snowflake()
|
||||
}
|
||||
)
|
||||
|
||||
# Initialize the ValtheraTool with your scoring configurations
|
||||
valthera_tool = ValtheraTool(
|
||||
data_aggregator=data_aggregator,
|
||||
motivation_config=[
|
||||
{"key": "hubspot_lead_score", "weight": 0.30, "transform": lambda x: min(x, 100) / 100.0},
|
||||
{"key": "posthog_events_count_past_30days", "weight": 0.30, "transform": lambda x: min(x, 50) / 50.0},
|
||||
{"key": "hubspot_marketing_emails_opened", "weight": 0.20, "transform": lambda x: min(x / 10.0, 1.0)},
|
||||
{"key": "posthog_session_count", "weight": 0.20, "transform": lambda x: min(x / 5.0, 1.0)}
|
||||
],
|
||||
ability_config=[
|
||||
{"key": "posthog_onboarding_steps_completed", "weight": 0.30, "transform": lambda x: min(x / 5.0, 1.0)},
|
||||
{"key": "posthog_session_count", "weight": 0.30, "transform": lambda x: min(x / 10.0, 1.0)},
|
||||
{"key": "behavior_complexity", "weight": 0.40, "transform": lambda x: 1 - (min(x, 5) / 5.0)}
|
||||
]
|
||||
)
|
||||
|
||||
print("✅ ValtheraTool successfully initialized for LangChain integration!")
|
||||
```
|
||||
|
||||
|
||||
The langchain-valthera integration allows you to assess user behavior and decide on the best course of action for engagement, ensuring that interactions are both timely and relevant within your LangChain applications.
|
||||
@@ -99,4 +99,23 @@ For more information and detailed examples, refer to the
|
||||
|
||||
Xinference also supports embedding queries and documents. See
|
||||
[example for xinference embeddings](/docs/integrations/text_embedding/xinference)
|
||||
for a more detailed demo.
|
||||
for a more detailed demo.
|
||||
|
||||
|
||||
### Xinference LangChain partner package install
|
||||
Install the integration package with:
|
||||
```bash
|
||||
pip install langchain-xinference
|
||||
```
|
||||
## Chat Models
|
||||
|
||||
```python
|
||||
from langchain_xinference.chat_models import ChatXinference
|
||||
```
|
||||
|
||||
## LLM
|
||||
|
||||
```python
|
||||
from langchain_xinference.llms import Xinference
|
||||
```
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain_openai import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain_community.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain_openai import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n",
|
||||
" Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain_openai import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGCHAIN_TRACING_V2=\"true\" export LANGCHAIN_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n",
|
||||
" Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain_openai import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGSMITH_TRACING=\"true\" export LANGSMITH_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n",
|
||||
" Document(page_content='This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic.\\n\\n```javascript\\nfrom langchain.agents import AgentType, initialize_agent, load_tools\\nfrom langchain_openai import OpenAI\\n```\\n\\nFirst, let\\'s load the language model we\\'re going to use to control the agent.\\n\\n```javascript\\nllm = OpenAI(temperature=0)\\n```\\n\\nNext, let\\'s load some tools to use. Note that the llm-math tool uses an LLM, so we need to pass that in.\\n\\n```javascript\\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\\n```\\n\\n## Using LCEL[\\u200b](/docs/modules/agents/agent_types/react#using-lcel \"Direct link to Using LCEL\")\\n\\nWe will first show how to create the agent using LCEL\\n\\n```javascript\\nfrom langchain import hub\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\nfrom langchain.tools.render import render_text_description\\n```\\n\\n```javascript\\nprompt = hub.pull(\"hwchase17/react\")\\nprompt = prompt.partial(\\n tools=render_text_description(tools),\\n tool_names=\", \".join([t.name for t in tools]),\\n)\\n```\\n\\n```javascript\\nllm_with_stop = llm.bind(stop=[\"\\\\nObservation\"])\\n```\\n\\n```javascript\\nagent = (\\n {\\n \"input\": lambda x: x[\"input\"],\\n \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\\n }\\n | prompt\\n | llm_with_stop\\n | ReActSingleInputOutputParser()\\n)\\n```\\n\\n```javascript\\nfrom langchain.agents import AgentExecutor\\n```\\n\\n```javascript\\nagent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"model Vittoria Ceretti I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"25 years I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43Answer: 3.991298452658078 I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\",\\n \\'output\\': \"Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}\\n```\\n\\n## Using ZeroShotReactAgent[\\u200b](/docs/modules/agents/agent_types/react#using-zeroshotreactagent \"Direct link to Using ZeroShotReactAgent\")\\n\\nWe will now show how to use the agent with an off-the-shelf agent implementation\\n\\n```javascript\\nagent_executor = initialize_agent(\\n tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\\n)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"\\n Observation: model Vittoria Ceretti\\n Thought: I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"\\n Observation: 25 years\\n Thought: I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43\\n Observation: Answer: 3.991298452658078\\n Thought: I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is L', metadata={'title': 'ReAct', 'source': 'https://d01.getoutline.com/doc/react-d6rxRS1MHk'})]"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"id": "92a22c77f03d43dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability. If you wish to do so, you can set the `LANGCHAIN_TRACING_V2` and `LANGCHAIN_API_KEY` environment variables:"
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability. If you wish to do so, you can set the `LANGSMITH_TRACING` and `LANGSMITH_API_KEY` environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -74,8 +74,8 @@
|
||||
"id": "98d8422ecee77403",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
|
||||
@@ -81,8 +81,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
365
docs/docs/integrations/tools/ads4gpts.ipynb
Normal file
365
docs/docs/integrations/tools/ads4gpts.ipynb
Normal file
@@ -0,0 +1,365 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ADS4GPTs\n",
|
||||
"\n",
|
||||
"Integrate AI native advertising into your Agentic application.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook outlines how to use the ADS4GPTs Tools and Toolkit in LangChain directly. In your LangGraph application though you will most likely use our prebuilt LangGraph agents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Install ADS4GPTs Package\n",
|
||||
"Install the ADS4GPTs package using pip."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install ADS4GPTs Package\n",
|
||||
"# Install the ADS4GPTs package using pip\n",
|
||||
"!pip install ads4gpts-langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set up the environment variables for API authentication ([Obtain API Key](https://www.ads4gpts.com))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup Environment Variables\n",
|
||||
"# Prompt the user to enter their ADS4GPTs API key securely\n",
|
||||
"if not os.environ.get(\"ADS4GPTS_API_KEY\"):\n",
|
||||
" os.environ[\"ADS4GPTS_API_KEY\"] = getpass(\"Enter your ADS4GPTS API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the necessary libraries, including ADS4GPTs tools and toolkit.\n",
|
||||
"\n",
|
||||
"Initialize the ADS4GPTs tools such as Ads4gptsInlineSponsoredResponseTool. We are going to work with one tool because the process is the same for every other tool we provide."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import Required Libraries\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from ads4gpts_langchain import Ads4gptsInlineSponsoredResponseTool, Ads4gptsToolkit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ADS4GPTs Tools\n",
|
||||
"# Initialize the Ads4gptsInlineSponsoredResponseTool\n",
|
||||
"inline_sponsored_response_tool = Ads4gptsInlineSponsoredResponseTool(\n",
|
||||
" ads4gpts_api_key=os.environ[\"ADS4GPTS_API_KEY\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Toolkit Instantiation\n",
|
||||
"Initialize the Ads4gptsToolkit with the required parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Initialized tool: Ads4gptsInlineSponsoredResponseTool\n",
|
||||
"Initialized tool: Ads4gptsSuggestedPromptTool\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Toolkit Initialization\n",
|
||||
"# Initialize the Ads4gptsToolkit with the required parameters\n",
|
||||
"toolkit = Ads4gptsToolkit(\n",
|
||||
" ads4gpts_api_key=os.environ[\"ADS4GPTS_API_KEY\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Retrieve tools from the toolkit\n",
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"# Print the initialized tools\n",
|
||||
"for tool in tools:\n",
|
||||
" print(f\"Initialized tool: {tool.__class__.__name__}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the ADS4GPTs tools with sample inputs and display the results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inline Sponsored Response Result: {'ad_text': '<- Promoted Content ->\\n\\nLearn the sartorial ways and get your handmade tailored suit by the masters themselves with Bespoke Tailors. [Subscribe now](https://youtube.com/@bespoketailorsdubai?si=9iH587ujoWKkueFa)\\n\\n<->'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Run ADS4GPTs Tools\n",
|
||||
"# Sample input data for the tools\n",
|
||||
"sample_input = {\n",
|
||||
" \"id\": \"test_id\",\n",
|
||||
" \"user_gender\": \"female\",\n",
|
||||
" \"user_age\": \"25-34\",\n",
|
||||
" \"user_persona\": \"test_persona\",\n",
|
||||
" \"ad_recommendation\": \"test_recommendation\",\n",
|
||||
" \"undesired_ads\": \"test_undesired_ads\",\n",
|
||||
" \"context\": \"test_context\",\n",
|
||||
" \"num_ads\": 1,\n",
|
||||
" \"style\": \"neutral\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# Run Ads4gptsInlineSponsoredResponseTool\n",
|
||||
"inline_sponsored_response_result = inline_sponsored_response_tool._run(\n",
|
||||
" **sample_input, ad_format=\"INLINE_SPONSORED_RESPONSE\"\n",
|
||||
")\n",
|
||||
"print(\"Inline Sponsored Response Result:\", inline_sponsored_response_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async Run ADS4GPTs Tools\n",
|
||||
"Run the ADS4GPTs tools asynchronously with sample inputs and display the results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async Inline Sponsored Response Result: {'ad_text': '<- Promoted Content ->\\n\\nGet the best tailoring content from Jonathan Farley. Learn to tie 100 knots and more! [Subscribe now](https://www.youtube.com/channel/UCx5hk4LN3p02jcUt3j_cexQ)\\n\\n<->'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define an async function to run the tools asynchronously\n",
|
||||
"async def run_ads4gpts_tools_async():\n",
|
||||
" # Run Ads4gptsInlineSponsoredResponseTool asynchronously\n",
|
||||
" inline_sponsored_response_result = await inline_sponsored_response_tool._arun(\n",
|
||||
" **sample_input, ad_format=\"INLINE_SPONSORED_RESPONSE\"\n",
|
||||
" )\n",
|
||||
" print(\"Async Inline Sponsored Response Result:\", inline_sponsored_response_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Toolkit Invocation\n",
|
||||
"Use the Ads4gptsToolkit to get and run tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Result from Ads4gptsInlineSponsoredResponseTool: {'ad_text': '<- Promoted Content ->\\n\\nLearn the sartorial ways and get your handmade tailored suit by the masters themselves with Bespoke Tailors. [Subscribe now](https://youtube.com/@bespoketailorsdubai?si=9iH587ujoWKkueFa)\\n\\n<->'}\n",
|
||||
"Async result from Ads4gptsInlineSponsoredResponseTool: {'ad_text': '<- Promoted Content ->\\n\\nGet the best tailoring content from Jonathan Farley. Learn to tie 100 knots and more! [Subscribe now](https://www.youtube.com/channel/UCx5hk4LN3p02jcUt3j_cexQ)\\n\\n<->'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Sample input data for the tools\n",
|
||||
"sample_input = {\n",
|
||||
" \"id\": \"test_id\",\n",
|
||||
" \"user_gender\": \"female\",\n",
|
||||
" \"user_age\": \"25-34\",\n",
|
||||
" \"user_persona\": \"test_persona\",\n",
|
||||
" \"ad_recommendation\": \"test_recommendation\",\n",
|
||||
" \"undesired_ads\": \"test_undesired_ads\",\n",
|
||||
" \"context\": \"test_context\",\n",
|
||||
" \"num_ads\": 1,\n",
|
||||
" \"style\": \"neutral\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# Run one tool and print the result\n",
|
||||
"tool = tools[0]\n",
|
||||
"result = tool._run(**sample_input)\n",
|
||||
"print(f\"Result from {tool.__class__.__name__}:\", result)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define an async function to run the tools asynchronously\n",
|
||||
"async def run_toolkit_tools_async():\n",
|
||||
" result = await tool._arun(**sample_input)\n",
|
||||
" print(f\"Async result from {tool.__class__.__name__}:\", result)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Execute the async function\n",
|
||||
"await run_toolkit_tools_async()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass(\"Enter your OPENAI_API_KEY API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tool call: content='' additional_kwargs={'tool_calls': [{'id': 'call_XLR5UjF8JhylVHvrk9mTjhj8', 'function': {'arguments': '{\"id\":\"unique_user_id_001\",\"user_gender\":\"male\",\"user_age\":\"18-24\",\"ad_recommendation\":\"Stylish and trendy clothing suitable for young men going out with friends.\",\"undesired_ads\":\"formal wear, women\\'s clothing, children\\'s clothing\",\"context\":\"A young man looking for clothing to go out with friends\",\"num_ads\":1,\"style\":\"youthful and trendy\",\"ad_format\":\"INLINE_SPONSORED_RESPONSE\"}', 'name': 'ads4gpts_inline_sponsored_response'}, 'type': 'function'}], 'refusal': None} response_metadata={'token_usage': {'completion_tokens': 106, 'prompt_tokens': 1070, 'total_tokens': 1176, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 1024}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_eb9dce56a8', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-e3e64b4b-4505-4a71-bf02-a8d77bb68eee-0' tool_calls=[{'name': 'ads4gpts_inline_sponsored_response', 'args': {'id': 'unique_user_id_001', 'user_gender': 'male', 'user_age': '18-24', 'ad_recommendation': 'Stylish and trendy clothing suitable for young men going out with friends.', 'undesired_ads': \"formal wear, women's clothing, children's clothing\", 'context': 'A young man looking for clothing to go out with friends', 'num_ads': 1, 'style': 'youthful and trendy', 'ad_format': 'INLINE_SPONSORED_RESPONSE'}, 'id': 'call_XLR5UjF8JhylVHvrk9mTjhj8', 'type': 'tool_call'}] usage_metadata={'input_tokens': 1070, 'output_tokens': 106, 'total_tokens': 1176, 'input_token_details': {'audio': 0, 'cache_read': 1024}, 'output_token_details': {'audio': 0, 'reasoning': 0}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"openai_model = ChatOpenAI(model=\"gpt-4o\", openai_api_key=os.environ[\"OPENAI_API_KEY\"])\n",
|
||||
"model = openai_model.bind_tools(tools)\n",
|
||||
"model_response = model.invoke(\n",
|
||||
" \"Get me an ad for clothing. I am a young man looking to go out with friends.\"\n",
|
||||
")\n",
|
||||
"print(\"Tool call:\", model_response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can learn more about ADS4GPTs and the tools at our [GitHub](https://github.com/ADS4GPTs/ads4gpts/tree/main)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ads4gpts-langraph-agent",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
1077
docs/docs/integrations/tools/agentql.ipynb
Normal file
1077
docs/docs/integrations/tools/agentql.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,256 +1,258 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "_9MNj58sIkGN"
|
||||
},
|
||||
"source": [
|
||||
"# Apify Actor\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
">[Apify Actors](https://docs.apify.com/platform/actors) are cloud programs designed for a wide range of web scraping, crawling, and data extraction tasks. These actors facilitate automated data gathering from the web, enabling users to extract, process, and store information efficiently. Actors can be used to perform tasks like scraping e-commerce sites for product details, monitoring price changes, or gathering search engine results. They integrate seamlessly with [Apify Datasets](https://docs.apify.com/platform/storage/dataset), allowing the structured data collected by actors to be stored, managed, and exported in formats like JSON, CSV, or Excel for further analysis or use.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "OHLF9t9v9HCb"
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"This integration lives in the [langchain-apify](https://pypi.org/project/langchain-apify/) package. The package can be installed using pip.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "4DdGmBn5IbXz"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langchain-apify"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rEAwonXqwggR"
|
||||
},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"- **Apify account**: Register your free Apify account [here](https://console.apify.com/sign-up).\n",
|
||||
"- **Apify API token**: Learn how to get your API token in the [Apify documentation](https://docs.apify.com/platform/integrations/api)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9nJOl4MBMkcR"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"APIFY_API_TOKEN\"] = \"your-apify-api-token\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "UfoQxAlCxR9q"
|
||||
},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "qG9KtXtLM8i7"
|
||||
},
|
||||
"source": [
|
||||
"Here we instantiate the `ApifyActorsTool` to be able to call [RAG Web Browser](https://apify.com/apify/rag-web-browser) Apify Actor. This Actor provides web browsing functionality for AI and LLM applications, similar to the web browsing feature in ChatGPT. Any Actor from the [Apify Store](https://apify.com/store) can be used in this way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {
|
||||
"id": "cyxeTlPnM4Ya"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_apify import ApifyActorsTool\n",
|
||||
"\n",
|
||||
"tool = ApifyActorsTool(\"apify/rag-web-browser\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "fGDLvDCqyKWO"
|
||||
},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"The `ApifyActorsTool` takes a single argument, which is `run_input` - a dictionary that is passed as a run input to the Actor. Run input schema documentation can be found in the input section of the Actor details page. See [RAG Web Browser input schema](https://apify.com/apify/rag-web-browser/input-schema).\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "nTWy6Hx1yk04"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({\"run_input\": {\"query\": \"what is apify?\", \"maxResults\": 2}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "kQsa27hoO58S"
|
||||
},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can provide the created tool to an [agent](https://python.langchain.com/docs/tutorials/agents/). When asked to search for information, the agent will call the Apify Actor, which will search the web, and then retrieve the search results.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "YySvLskW72Y8"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langgraph langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"metadata": {
|
||||
"id": "QEDz07btO5Gi"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"tools = [tool]\n",
|
||||
"graph = create_react_agent(model, tools=tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "XS1GEyNkQxGu",
|
||||
"outputId": "195273d7-034c-425b-f3f9-95c0a9fb0c9e"
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"search for what is Apify\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" apify_actor_apify_rag-web-browser (call_27mjHLzDzwa5ZaHWCMH510lm)\n",
|
||||
" Call ID: call_27mjHLzDzwa5ZaHWCMH510lm\n",
|
||||
" Args:\n",
|
||||
" run_input: {\"run_input\":{\"query\":\"Apify\",\"maxResults\":3,\"outputFormats\":[\"markdown\"]}}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Apify is a comprehensive platform for web scraping, browser automation, and data extraction. It offers a wide array of tools and services that cater to developers and businesses looking to extract data from websites efficiently and effectively. Here's an overview of Apify:\n",
|
||||
"\n",
|
||||
"1. **Ecosystem and Tools**:\n",
|
||||
" - Apify provides an ecosystem where developers can build, deploy, and publish data extraction and web automation tools called Actors.\n",
|
||||
" - The platform supports various use cases such as extracting data from social media platforms, conducting automated browser-based tasks, and more.\n",
|
||||
"\n",
|
||||
"2. **Offerings**:\n",
|
||||
" - Apify offers over 3,000 ready-made scraping tools and code templates.\n",
|
||||
" - Users can also build custom solutions or hire Apify's professional services for more tailored data extraction needs.\n",
|
||||
"\n",
|
||||
"3. **Technology and Integration**:\n",
|
||||
" - The platform supports integration with popular tools and services like Zapier, GitHub, Google Sheets, Pinecone, and more.\n",
|
||||
" - Apify supports open-source tools and technologies such as JavaScript, Python, Puppeteer, Playwright, Selenium, and its own Crawlee library for web crawling and browser automation.\n",
|
||||
"\n",
|
||||
"4. **Community and Learning**:\n",
|
||||
" - Apify hosts a community on Discord where developers can get help and share expertise.\n",
|
||||
" - It offers educational resources through the Web Scraping Academy to help users become proficient in data scraping and automation.\n",
|
||||
"\n",
|
||||
"5. **Enterprise Solutions**:\n",
|
||||
" - Apify provides enterprise-grade web data extraction solutions with high reliability, 99.95% uptime, and compliance with SOC2, GDPR, and CCPA standards.\n",
|
||||
"\n",
|
||||
"For more information, you can visit [Apify's official website](https://apify.com/) or their [GitHub page](https://github.com/apify) which contains their code repositories and further details about their projects.\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "_9MNj58sIkGN"
|
||||
},
|
||||
"source": [
|
||||
"# Apify Actor\n",
|
||||
"\n",
|
||||
">[Apify Actors](https://docs.apify.com/platform/actors) are cloud programs designed for a wide range of web scraping, crawling, and data extraction tasks. These actors facilitate automated data gathering from the web, enabling users to extract, process, and store information efficiently. Actors can be used to perform tasks like scraping e-commerce sites for product details, monitoring price changes, or gathering search engine results. They integrate seamlessly with [Apify Datasets](https://docs.apify.com/platform/storage/dataset), allowing the structured data collected by actors to be stored, managed, and exported in formats like JSON, CSV, or Excel for further analysis or use.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"This notebook walks you through using [Apify Actors](https://docs.apify.com/platform/actors) with LangChain to automate web scraping and data extraction. The `langchain-apify` package integrates Apify's cloud-based tools with LangChain agents, enabling efficient data collection and processing for AI applications.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "OHLF9t9v9HCb"
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"This integration lives in the [langchain-apify](https://pypi.org/project/langchain-apify/) package. The package can be installed using pip.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "4DdGmBn5IbXz"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langchain-apify"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rEAwonXqwggR"
|
||||
},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"- **Apify account**: Register your free Apify account [here](https://console.apify.com/sign-up).\n",
|
||||
"- **Apify API token**: Learn how to get your API token in the [Apify documentation](https://docs.apify.com/platform/integrations/api)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9nJOl4MBMkcR"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"APIFY_API_TOKEN\"] = \"your-apify-api-token\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "UfoQxAlCxR9q"
|
||||
},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "qG9KtXtLM8i7"
|
||||
},
|
||||
"source": [
|
||||
"Here we instantiate the `ApifyActorsTool` to be able to call [RAG Web Browser](https://apify.com/apify/rag-web-browser) Apify Actor. This Actor provides web browsing functionality for AI and LLM applications, similar to the web browsing feature in ChatGPT. Any Actor from the [Apify Store](https://apify.com/store) can be used in this way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "cyxeTlPnM4Ya"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_apify import ApifyActorsTool\n",
|
||||
"\n",
|
||||
"tool = ApifyActorsTool(\"apify/rag-web-browser\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "fGDLvDCqyKWO"
|
||||
},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"The `ApifyActorsTool` takes a single argument, which is `run_input` - a dictionary that is passed as a run input to the Actor. Run input schema documentation can be found in the input section of the Actor details page. See [RAG Web Browser input schema](https://apify.com/apify/rag-web-browser/input-schema).\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "nTWy6Hx1yk04"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({\"run_input\": {\"query\": \"what is apify?\", \"maxResults\": 2}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "kQsa27hoO58S"
|
||||
},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can provide the created tool to an [agent](https://python.langchain.com/docs/tutorials/agents/). When asked to search for information, the agent will call the Apify Actor, which will search the web, and then retrieve the search results.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "YySvLskW72Y8"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langgraph langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "QEDz07btO5Gi"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"tools = [tool]\n",
|
||||
"graph = create_react_agent(model, tools=tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "XS1GEyNkQxGu",
|
||||
"outputId": "195273d7-034c-425b-f3f9-95c0a9fb0c9e"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"search for what is Apify\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" apify_actor_apify_rag-web-browser (call_27mjHLzDzwa5ZaHWCMH510lm)\n",
|
||||
" Call ID: call_27mjHLzDzwa5ZaHWCMH510lm\n",
|
||||
" Args:\n",
|
||||
" run_input: {\"run_input\":{\"query\":\"Apify\",\"maxResults\":3,\"outputFormats\":[\"markdown\"]}}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Apify is a comprehensive platform for web scraping, browser automation, and data extraction. It offers a wide array of tools and services that cater to developers and businesses looking to extract data from websites efficiently and effectively. Here's an overview of Apify:\n",
|
||||
"\n",
|
||||
"1. **Ecosystem and Tools**:\n",
|
||||
" - Apify provides an ecosystem where developers can build, deploy, and publish data extraction and web automation tools called Actors.\n",
|
||||
" - The platform supports various use cases such as extracting data from social media platforms, conducting automated browser-based tasks, and more.\n",
|
||||
"\n",
|
||||
"2. **Offerings**:\n",
|
||||
" - Apify offers over 3,000 ready-made scraping tools and code templates.\n",
|
||||
" - Users can also build custom solutions or hire Apify's professional services for more tailored data extraction needs.\n",
|
||||
"\n",
|
||||
"3. **Technology and Integration**:\n",
|
||||
" - The platform supports integration with popular tools and services like Zapier, GitHub, Google Sheets, Pinecone, and more.\n",
|
||||
" - Apify supports open-source tools and technologies such as JavaScript, Python, Puppeteer, Playwright, Selenium, and its own Crawlee library for web crawling and browser automation.\n",
|
||||
"\n",
|
||||
"4. **Community and Learning**:\n",
|
||||
" - Apify hosts a community on Discord where developers can get help and share expertise.\n",
|
||||
" - It offers educational resources through the Web Scraping Academy to help users become proficient in data scraping and automation.\n",
|
||||
"\n",
|
||||
"5. **Enterprise Solutions**:\n",
|
||||
" - Apify provides enterprise-grade web data extraction solutions with high reliability, 99.95% uptime, and compliance with SOC2, GDPR, and CCPA standards.\n",
|
||||
"\n",
|
||||
"For more information, you can visit [Apify's official website](https://apify.com/) or their [GitHub page](https://github.com/apify) which contains their code repositories and further details about their projects.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inputs = {\"messages\": [(\"user\", \"search for what is Apify\")]}\n",
|
||||
"for s in graph.stream(inputs, stream_mode=\"values\"):\n",
|
||||
" message = s[\"messages\"][-1]\n",
|
||||
" # skip tool messages\n",
|
||||
" if isinstance(message, ToolMessage):\n",
|
||||
" continue\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "WYXuQIQx8AvG"
|
||||
},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more information on how to use this integration, see the [git repository](https://github.com/apify/langchain-apify) or the [Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "f1NnMik78oib"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inputs = {\"messages\": [(\"user\", \"search for what is Apify\")]}\n",
|
||||
"for s in graph.stream(inputs, stream_mode=\"values\"):\n",
|
||||
" message = s[\"messages\"][-1]\n",
|
||||
" # skip tool messages\n",
|
||||
" if isinstance(message, ToolMessage):\n",
|
||||
" continue\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "WYXuQIQx8AvG"
|
||||
},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more information on how to use this integration, see the [git repository](https://github.com/apify/langchain-apify) or the [Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "f1NnMik78oib"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -89,8 +89,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -59,8 +59,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
345
docs/docs/integrations/tools/opengradient_toolkit.ipynb
Normal file
345
docs/docs/integrations/tools/opengradient_toolkit.ipynb
Normal file
@@ -0,0 +1,345 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: OpenGradient\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenGradientToolkit\n",
|
||||
"\n",
|
||||
"This notebook shows how to build tools using the OpenGradient toolkit. This toolkit gives users the ability to create custom tools based on models and workflows on the [OpenGradient network](https://www.opengradient.ai/).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Ensure that you have an OpenGradient API key in order to access the OpenGradient network. If you already have an API key, simply set the environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8f7303e1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export OPENGRADIENT_PRIVATE_KEY=\"your-api-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a7af45e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you need to set up a new API key, download the opengradient SDK and follow the instructions to initialize a new configuration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a7777f1e",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install opengradient\n",
|
||||
"!opengradient config init"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-opengradient` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-opengradient"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit with the API key from before."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_opengradient import OpenGradientToolkit\n",
|
||||
"\n",
|
||||
"toolkit = OpenGradientToolkit(\n",
|
||||
" # Not required if you have already set the environment variable OPENGRADIENT_PRIVATE_KEY\n",
|
||||
" private_key=\"your-api-key\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ad986625",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build your own tools\n",
|
||||
"The OpenGradientToolkit offers two main methods for creating custom tools:\n",
|
||||
"\n",
|
||||
"### 1. Create a tool to run ML models\n",
|
||||
"You can create tools that leverage ML models deployed on the [OpenGradient model hub](https://hub.opengradient.ai/). User-created models can be uploaded, inferenced, and shared to the model hub through the [OpenGradient SDK](https://docs.opengradient.ai/developers/sdk/model_management.html).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f7a03746",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import opengradient as og\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Example 1: Simple tool with no input schema\n",
|
||||
"def price_data_provider():\n",
|
||||
" \"\"\"Function that provides input data to the model.\"\"\"\n",
|
||||
" return {\n",
|
||||
" \"open_high_low_close\": [\n",
|
||||
" [2535.79, 2535.79, 2505.37, 2515.36],\n",
|
||||
" [2515.37, 2516.37, 2497.27, 2506.94],\n",
|
||||
" [2506.94, 2515, 2506.35, 2508.77],\n",
|
||||
" [2508.77, 2519, 2507.55, 2518.79],\n",
|
||||
" [2518.79, 2522.1, 2513.79, 2517.92],\n",
|
||||
" [2517.92, 2521.4, 2514.65, 2518.13],\n",
|
||||
" [2518.13, 2525.4, 2517.2, 2522.6],\n",
|
||||
" [2522.59, 2528.81, 2519.49, 2526.12],\n",
|
||||
" [2526.12, 2530, 2524.11, 2529.99],\n",
|
||||
" [2529.99, 2530.66, 2525.29, 2526],\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_volatility(inference_result):\n",
|
||||
" \"\"\"Function that formats the model output.\"\"\"\n",
|
||||
" return format(float(inference_result.model_output[\"Y\"].item()), \".3%\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create the tool\n",
|
||||
"volatility_tool = toolkit.create_run_model_tool(\n",
|
||||
" model_cid=\"QmRhcpDXfYCKsimTmJYrAVM4Bbvck59Zb2onj3MHv9Kw5N\",\n",
|
||||
" tool_name=\"eth_volatility\",\n",
|
||||
" model_input_provider=price_data_provider,\n",
|
||||
" model_output_formatter=format_volatility,\n",
|
||||
" tool_description=\"Generates volatility measurement for ETH/USDT trading pair\",\n",
|
||||
" inference_mode=og.InferenceMode.VANILLA,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Example 2: Tool with input schema from the agent\n",
|
||||
"class TokenInputSchema(BaseModel):\n",
|
||||
" token: str = Field(description=\"Token name (ethereum or bitcoin)\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def token_data_provider(**inputs):\n",
|
||||
" \"\"\"Dynamic function that changes behavior based on agent input.\"\"\"\n",
|
||||
" token = inputs.get(\"token\")\n",
|
||||
" if token == \"bitcoin\":\n",
|
||||
" return {\"price_series\": [100001.1, 100013.2, 100149.2, 99998.1]}\n",
|
||||
" else: # ethereum\n",
|
||||
" return {\"price_series\": [2010.1, 2012.3, 2020.1, 2019.2]}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create the tool with schema\n",
|
||||
"token_tool = toolkit.create_run_model_tool(\n",
|
||||
" model_cid=\"QmZdSfHWGJyzBiB2K98egzu3MypPcv4R1ASypUxwZ1MFUG\",\n",
|
||||
" tool_name=\"token_volatility\",\n",
|
||||
" model_input_provider=token_data_provider,\n",
|
||||
" model_output_formatter=lambda x: format(float(x.model_output[\"std\"].item()), \".3%\"),\n",
|
||||
" tool_input_schema=TokenInputSchema,\n",
|
||||
" tool_description=\"Measures return volatility for a specified token\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add tools to the toolkit\n",
|
||||
"toolkit.add_tool(volatility_tool)\n",
|
||||
"toolkit.add_tool(token_tool)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45627b99",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2. Create a tool to read workflow results\n",
|
||||
"\n",
|
||||
"Read workflows are scheduled inferences that regularly run models stored on smart-contracts with live oracle data. More information on these can be [found here](https://docs.opengradient.ai/developers/sdk/ml_workflows.html).\n",
|
||||
"\n",
|
||||
"You can create tools that read results from workflow smart contracts:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "197cccbd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a tool to read from a workflow\n",
|
||||
"forecast_tool = toolkit.create_read_workflow_tool(\n",
|
||||
" workflow_contract_address=\"0x58826c6dc9A608238d9d57a65bDd50EcaE27FE99\",\n",
|
||||
" tool_name=\"ETH_Price_Forecast\",\n",
|
||||
" tool_description=\"Reads latest forecast for ETH price from deployed workflow\",\n",
|
||||
" output_formatter=lambda x: f\"Price change forecast: {format(float(x.numbers['regression_output'].item()), '.2%')}\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add the tool to the toolkit\n",
|
||||
"toolkit.add_tool(forecast_tool)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c5f2839-4020-424e-9fc9-07777eede442",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"Use the built in `get_tools()` method to view a list of the available tools within the OpenGradient toolkit."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"# View tools\n",
|
||||
"for tool in tools:\n",
|
||||
" print(tool)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"Here's how to use your OpenGradient tools with a LangChain agent:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"# Initialize LLM\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"\n",
|
||||
"# Create tools from the toolkit\n",
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"# Create agent\n",
|
||||
"agent_executor = create_react_agent(llm, tools)\n",
|
||||
"\n",
|
||||
"# Example query for the agent\n",
|
||||
"example_query = \"What's the current volatility of ETH?\"\n",
|
||||
"\n",
|
||||
"# Execute the agent\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bb2b716",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here's a sample output of everything put together:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"================================ Human Message =================================\n",
|
||||
"\n",
|
||||
"What's the current volatility of ETH?\n",
|
||||
"================================== Ai Message ==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" eth_volatility (chatcmpl-tool-d66ab9ee8f2c40e5a2634d90c7aeb17d)\n",
|
||||
" Call ID: chatcmpl-tool-d66ab9ee8f2c40e5a2634d90c7aeb17d\n",
|
||||
" Args:\n",
|
||||
"================================= Tool Message =================================\n",
|
||||
"Name: eth_volatility\n",
|
||||
"\n",
|
||||
"0.038%\n",
|
||||
"================================== Ai Message ==================================\n",
|
||||
"\n",
|
||||
"The current volatility of the ETH/USDT trading pair is 0.038%.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "268bc64a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"See the [Github page](https://github.com/OpenGradient/og-langchain) for more detail."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
315
docs/docs/integrations/tools/tableau.ipynb
Normal file
315
docs/docs/integrations/tools/tableau.ipynb
Normal file
@@ -0,0 +1,315 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f302499-eb05-4296-8716-950babc0f10e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tableau\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with [Tableau](https://help.tableau.com/current/api/vizql-data-service/en-us/index.html). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4d57b913-819e-4676-9f6e-3afe0a80030e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Overview\n",
|
||||
"\n",
|
||||
"Tableau's VizQL Data Service (aka VDS) provides developers with programmatic access to their Tableau Published Data Sources, allowing them to extend their business semantics for any custom workload or application, including AI Agents. The simple_datasource_qa tool adds VDS to the Langchain framework. This notebook shows you how you can use it to build agents that answer analytical questions grounded on your enterprise semantic models. \n",
|
||||
"\n",
|
||||
"Follow the [tableau-langchain](https://github.com/Tab-SE/tableau_langchain) project for more tools coming soon!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "311bce64",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Setup\n",
|
||||
"Make sure you are running and have access to:\n",
|
||||
"1. python version 3.12.2 or higher\n",
|
||||
"2. A Tableau Cloud or Server environment with at least 1 published data source\n",
|
||||
"\n",
|
||||
"Get started by installing and/or importing the required packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9b178e95-ffae-4f04-ad77-1fdc2ab05edf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8605e87a-2253-4c89-992a-ecdbec955ef6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c13dca76",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: regex>=2022.1.18 in /Users/joe.constantino/.pyenv/versions/3.12.2/lib/python3.12/site-packages (from tiktoken<1,>=0.7->langchain-openai->langchain-tableau) (2024.11.6)\r\n",
|
||||
"Requirement already satisfied: httpcore==1.* in /Users/joe.constantino/.pyenv/versions/3.12.2/lib/python3.12/site-packages (from httpx>=0.25.2->langgraph-sdk<0.2.0,>=0.1.42->langgraph->langchain-tableau) (1.0.7)\r\n",
|
||||
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/joe.constantino/.pyenv/versions/3.12.2/lib/python3.12/site-packages (from httpcore==1.*->httpx>=0.25.2->langgraph-sdk<0.2.0,>=0.1.42->langgraph->langchain-tableau) (0.14.0)\r\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# %pip install langchain-tableau --upgrade"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bbaa05f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note you may need to restart your kernal to use updated packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "80473fcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"You can declare your environment variables explicitly, as shown in several cases in this doc. However, if these parameters are not provided, the simple_datasource_qa tool will attempt to automatically read them from environment variables.\n",
|
||||
"\n",
|
||||
"For the Data Source that you choose to query, make sure you've updated the VizqlDataApiAccess permission in Tableau to allow the VDS API to access that Data Source via REST. More info [here](https://help.tableau.com/current/server/en-us/permissions_capabilities.htm#data-sources\n",
|
||||
"). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "310d21b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# langchain package imports\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# langchain_tableau and langgraph imports\n",
|
||||
"from langchain_tableau.tools.simple_datasource_qa import initialize_simple_datasource_qa\n",
|
||||
"from langgraph.prebuilt import create_react_agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "596d6718-f2e1-44bb-b614-65447862661c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Authentication Variables\n",
|
||||
"You can declare your environment variables explicitly, as shown in several cases in this cookbook. However, if these parameters are not provided, the simple_datasource_qa tool will attempt to automatically read them from environment variables.\n",
|
||||
"\n",
|
||||
"For the Data Source that you choose, make sure you've updated the VizqlDataApiAccess permission in Tableau to allow the VDS API to access that Data Source via REST. More info [here](https://help.tableau.com/current/server/en-us/permissions_capabilities.htm#data-sources\n",
|
||||
"). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "ccfb4159-34ac-4816-a8f0-795c5442c0b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"load_dotenv()\n",
|
||||
"\n",
|
||||
"tableau_server = \"https://stage-dataplane2.tableau.sfdc-shbmgi.svc.sfdcfc.net/\" # replace with your Tableau server name\n",
|
||||
"tableau_site = \"vizqldataservicestage02\" # replace with your Tableau site\n",
|
||||
"tableau_jwt_client_id = os.getenv(\n",
|
||||
" \"TABLEAU_JWT_CLIENT_ID\"\n",
|
||||
") # a JWT client ID (obtained through Tableau's admin UI)\n",
|
||||
"tableau_jwt_secret_id = os.getenv(\n",
|
||||
" \"TABLEAU_JWT_SECRET_ID\"\n",
|
||||
") # a JWT secret ID (obtained through Tableau's admin UI)\n",
|
||||
"tableau_jwt_secret = os.getenv(\n",
|
||||
" \"TABLEAU_JWT_SECRET\"\n",
|
||||
") # a JWT secret ID (obtained through Tableau's admin UI)\n",
|
||||
"tableau_api_version = \"3.21\" # the current Tableau REST API Version\n",
|
||||
"tableau_user = \"joe.constantino@salesforce.com\" # replace with the username querying the target Tableau Data Source\n",
|
||||
"\n",
|
||||
"# For this cookbook we are connecting to the Superstore dataset that comes by default with every Tableau server\n",
|
||||
"datasource_luid = (\n",
|
||||
" \"0965e61b-a072-43cf-994c-8c6cf526940d\" # the target data source for this Tool\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add variables to control LLM models for the Agent and Tools\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] # set an your model API key as an environment variable\n",
|
||||
"tooling_llm_model = \"gpt-4o\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "64d08107",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"The initialize_simple_datasource_qa initializes the Langgraph tool called [simple_datasource_qa](https://github.com/Tab-SE/tableau_langchain/blob/3ff9047414479cd55d797c18a78f834d57860761/pip_package/langchain_tableau/tools/simple_datasource_qa.py#L101), which can be used for analytical questions and answers on a Tableau Data Source.\n",
|
||||
"\n",
|
||||
"This initializer function:\n",
|
||||
"1. Authenticates to Tableau using Tableau's connected-app framework for JWT-based authentication. All the required variables must be defined at runtime or as environment variables.\n",
|
||||
"2. Asynchronously queries for the field metadata of the target datasource specified in the datasource_luid variable.\n",
|
||||
"3. Grounds on the metadata of the target datasource to transform natural language questions into the json-formatted query payload required to make VDS query-datasource requests.\n",
|
||||
"4. Executes a POST request to VDS.\n",
|
||||
"5. Formats and returns the results in a structured response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "72ee3eca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initalize simple_datasource_qa for querying Tableau Datasources through VDS\n",
|
||||
"analyze_datasource = initialize_simple_datasource_qa(\n",
|
||||
" domain=tableau_server,\n",
|
||||
" site=tableau_site,\n",
|
||||
" jwt_client_id=tableau_jwt_client_id,\n",
|
||||
" jwt_secret_id=tableau_jwt_secret_id,\n",
|
||||
" jwt_secret=tableau_jwt_secret,\n",
|
||||
" tableau_api_version=tableau_api_version,\n",
|
||||
" tableau_user=tableau_user,\n",
|
||||
" datasource_luid=datasource_luid,\n",
|
||||
" tooling_llm_model=tooling_llm_model,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# load the List of Tools to be used by the Agent. In this case we will just load our data source Q&A tool.\n",
|
||||
"tools = [analyze_datasource]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ac5daa0-4336-48d0-9c26-20bf2c252bad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation - Langgraph Example\n",
|
||||
"First, we'll initlialize the LLM of our choice. Then, we define an agent using a langgraph agent constructor class and invoke it with a query related to the target data source. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "06a1d3f7-79a8-452e-b37e-9070d15445b0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"Here are the results for the states with the highest sales and profits based on the data queried:\n",
|
||||
"\n",
|
||||
"### States with the Most Sales\n",
|
||||
"1. **California**: $457,687.63\n",
|
||||
"2. **New York**: $310,876.27\n",
|
||||
"3. **Texas**: $170,188.05\n",
|
||||
"4. **Washington**: $138,641.27\n",
|
||||
"5. **Pennsylvania**: $116,511.91\n",
|
||||
"\n",
|
||||
"### States with the Most Profit\n",
|
||||
"1. **California**: $76,381.39\n",
|
||||
"2. **New York**: $74,038.55\n",
|
||||
"3. **Washington**: $33,402.65\n",
|
||||
"4. **Michigan**: $24,463.19\n",
|
||||
"5. **Virginia**: $18,597.95\n",
|
||||
"\n",
|
||||
"### Comparison\n",
|
||||
"- **California** and **New York** are the only states that appear in both lists, indicating they are the top sellers and also generate the most profit.\n",
|
||||
"- **Texas**, while having the third highest sales, does not rank in the top five for profit, showing a potential issue with profitability despite high sales.\n",
|
||||
"\n",
|
||||
"This analysis suggests that high sales do not always correlate with high profits, as seen with Texas."
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
|
||||
"\n",
|
||||
"tableauAgent = create_react_agent(model, tools)\n",
|
||||
"\n",
|
||||
"# Run the agent\n",
|
||||
"messages = tableauAgent.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"which states sell the most? Are those the same states with the most profits?\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"messages\n",
|
||||
"# display(Markdown(messages['messages'][4].content)) #display a nicely formatted answer for successful generations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e6b20093",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"TODO."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12ab3d7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"TODO."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python (package_test_env)",
|
||||
"language": "python",
|
||||
"name": "package_test_env"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
306
docs/docs/integrations/tools/taiga.ipynb
Normal file
306
docs/docs/integrations/tools/taiga.ipynb
Normal file
@@ -0,0 +1,306 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "10238e62-3465-4973-9279-606cbb7ccf16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Taiga\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Taiga\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Taiga tooling in [langchain_taiga](https://github.com/Shikenso-Analytics/langchain-taiga/blob/main/docs/tools.ipynb). For more details on each tool and configuration, see the docstrings in your repository or relevant doc pages.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | JS support | Package latest |\n",
|
||||
"|:-----------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------| :---: |:------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------------------:|\n",
|
||||
"| `create_entity_tool`, `search_entities_tool`, `get_entity_by_ref_tool`, `update_entity_by_ref_tool` , `add_comment_by_ref_tool`, `add_attachment_by_ref_tool` | [langchain-taiga](https://github.com/Shikenso-Analytics/langchain-taiga) | N/A | TBD |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"- **`create_entity_tool`**: Creates user stories, tasks and issues in Taiga.\n",
|
||||
"- **`search_entities_tool`**: Searches for user stories, tasks and issues in Taiga.\n",
|
||||
"- **`get_entity_by_ref_tool`**: Gets a user story, task or issue by reference.\n",
|
||||
"- **`update_entity_by_ref_tool`**: Updates a user story, task or issue by reference.\n",
|
||||
"- **`add_comment_by_ref_tool`**: Adds a comment to a user story, task or issue.\n",
|
||||
"- **`add_attachment_by_ref_tool`**: Adds an attachment to a user story, task or issue.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-taiga` package."
|
||||
],
|
||||
"id": "41616bfd02d989a6"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "f85b4089",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-28T12:43:23.290414Z",
|
||||
"start_time": "2025-02-28T12:43:23.162563Z"
|
||||
}
|
||||
},
|
||||
"source": "%pip install --quiet -U langchain-taiga",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/henlein/Workspace/PyCharm/langchain/.venv/bin/python: No module named pip\r\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b15e9266",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"This integration requires you to set `TAIGA_URL`, `TAIGA_API_URL`, `TAIGA_USERNAME`, `TAIGA_PASSWORD` and `OPENAI_API_KEY` as environment variables to authenticate with Taiga.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export TAIGA_URL=\"https://taiga.xyz.org/\"\n",
|
||||
"export TAIGA_API_URL=\"https://taiga.xyz.org/\"\n",
|
||||
"export TAIGA_USERNAME=\"username\"\n",
|
||||
"export TAIGA_PASSWORD=\"pw\"\n",
|
||||
"export OPENAI_API_KEY=\"OPENAI_API_KEY\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-02-28T12:43:23.295879Z",
|
||||
"start_time": "2025-02-28T12:43:23.293809Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 4
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Below is an example showing how to instantiate the Taiga tools in `langchain_taiga`. Adjust as needed for your specific usage."
|
||||
],
|
||||
"id": "d6eab61edeeb40a5"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": [
|
||||
"from langchain_taiga.tools.discord_read_messages import create_entity_tool\n",
|
||||
"from langchain_taiga.tools.discord_send_messages import search_entities_tool\n",
|
||||
"\n",
|
||||
"create_tool = create_entity_tool\n",
|
||||
"search_tool = search_entities_tool"
|
||||
],
|
||||
"id": "8ae97a3413cd040e"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74147a1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"### Direct invocation with args\n",
|
||||
"\n",
|
||||
"Below is a simple example of calling the tool with keyword arguments in a dictionary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"from langchain_taiga.tools.taiga_tools import (\n",
|
||||
" add_attachment_by_ref_tool,\n",
|
||||
" add_comment_by_ref_tool,\n",
|
||||
" create_entity_tool,\n",
|
||||
" get_entity_by_ref_tool,\n",
|
||||
" search_entities_tool,\n",
|
||||
" update_entity_by_ref_tool,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = create_entity_tool.invoke(\n",
|
||||
" {\n",
|
||||
" \"project_slug\": \"slug\",\n",
|
||||
" \"entity_type\": \"us\",\n",
|
||||
" \"subject\": \"subject\",\n",
|
||||
" \"status\": \"new\",\n",
|
||||
" \"description\": \"desc\",\n",
|
||||
" \"parent_ref\": 5,\n",
|
||||
" \"assign_to\": \"user\",\n",
|
||||
" \"due_date\": \"2022-01-01\",\n",
|
||||
" \"tags\": [\"tag1\", \"tag2\"],\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = search_entities_tool.invoke(\n",
|
||||
" {\"project_slug\": \"slug\", \"query\": \"query\", \"entity_type\": \"task\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = get_entity_by_ref_tool.invoke(\n",
|
||||
" {\"entity_type\": \"user_story\", \"project_id\": 1, \"ref\": \"1\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = update_entity_by_ref_tool.invoke(\n",
|
||||
" {\"project_slug\": \"slug\", \"entity_ref\": 555, \"entity_type\": \"us\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"response = add_comment_by_ref_tool.invoke(\n",
|
||||
" {\"project_slug\": \"slug\", \"entity_ref\": 3, \"entity_type\": \"us\", \"comment\": \"new\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = add_attachment_by_ref_tool.invoke(\n",
|
||||
" {\n",
|
||||
" \"project_slug\": \"slug\",\n",
|
||||
" \"entity_ref\": 3,\n",
|
||||
" \"entity_type\": \"us\",\n",
|
||||
" \"attachment_url\": \"url\",\n",
|
||||
" \"content_type\": \"png\",\n",
|
||||
" \"description\": \"desc\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e73897",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invocation with ToolCall\n",
|
||||
"\n",
|
||||
"If you have a model-generated `ToolCall`, pass it to `tool.invoke()` in the format shown below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f90e33a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n",
|
||||
"model_generated_tool_call = {\n",
|
||||
" \"args\": {\"project_slug\": \"slug\", \"query\": \"query\", \"entity_type\": \"task\"},\n",
|
||||
" \"id\": \"1\",\n",
|
||||
" \"name\": search_entities_tool.name,\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
"}\n",
|
||||
"tool.invoke(model_generated_tool_call)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"Below is a more complete example showing how you might integrate the `create_entity_tool` and `search_entities_tool` tools in a chain or agent with an LLM. This example assumes you have a function (like `create_react_agent`) that sets up a LangChain-style agent capable of calling tools when appropriate.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# Example: Using Taiga Tools in an Agent\n",
|
||||
"\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"from langchain_taiga.tools.taiga_tools import create_entity_tool, search_entities_tool\n",
|
||||
"\n",
|
||||
"# 1. Instantiate or configure your language model\n",
|
||||
"# (Replace with your actual LLM, e.g., ChatOpenAI(temperature=0))\n",
|
||||
"llm = ...\n",
|
||||
"\n",
|
||||
"# 2. Build an agent that has access to these tools\n",
|
||||
"agent_executor = create_react_agent(llm, [create_entity_tool, search_entities_tool])\n",
|
||||
"\n",
|
||||
"# 4. Formulate a user query that may invoke one or both tools\n",
|
||||
"example_query = \"Please create a new user story with the subject 'subject' in slug project: 'slug'\"\n",
|
||||
"\n",
|
||||
"# 5. Execute the agent in streaming mode (or however your code is structured)\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# 6. Print out the model's responses (and any tool outputs) as they arrive\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()\n",
|
||||
"```\n"
|
||||
],
|
||||
"id": "8cafefef7c8bd43e"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"See the docstrings in:\n",
|
||||
"- [taiga_tools.py](https://github.com/Shikenso-Analytics/langchain-taiga/blob/main/langchain_taiga/tools/taiga_tools.py)\n",
|
||||
"- [toolkits.py](https://github.com/Shikenso-Analytics/langchain-taiga/blob/main/langchain_taiga/toolkits.py)\n",
|
||||
"\n",
|
||||
"for usage details, parameters, and advanced configurations."
|
||||
],
|
||||
"id": "4ac8146c"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
375
docs/docs/integrations/tools/tavily_extract.ipynb
Normal file
375
docs/docs/integrations/tools/tavily_extract.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -18,29 +18,42 @@
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/tavily_search) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [TavilySearchResults](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ✅ |  |\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/tavily_search) | Package latest |\n",
|
||||
"|:--------------------------------------------------------------|:---------------------------------------------------------------| :---: | :---: | :---: |\n",
|
||||
"| [TavilySearch](https://github.com/tavily-ai/langchain-tavily) | [langchain-tavily](https://pypi.org/project/langchain-tavily/) | ❌ | ❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"| [Returns artifact](/docs/how_to/tool_artifacts/) | Native async | Return data | Pricing |\n",
|
||||
"| :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | Title, URL, content, answer | 1,000 free searches / month | \n",
|
||||
"| [Returns artifact](/docs/how_to/tool_artifacts/) | Native async | Return data | Pricing |\n",
|
||||
"| :---: | :---: |:--------------------------------------------------------:| :---: |\n",
|
||||
"| ❌ | ✅ | title, URL, content snippet, raw_content, answer, images | 1,000 free searches / month |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package. We also need to install the `tavily-python` package."
|
||||
"The integration lives in the `langchain-tavily` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "f85b4089",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-19T16:45:22.942913Z",
|
||||
"start_time": "2025-03-19T16:45:22.799418Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU \"langchain-community>=0.2.11\" tavily-python"
|
||||
"%pip install -qU langchain-tavily"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,7 +70,12 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-19T16:43:32.810957Z",
|
||||
"start_time": "2025-03-19T16:43:15.570Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
@@ -67,25 +85,6 @@
|
||||
" os.environ[\"TAVILY_API_KEY\"] = getpass.getpass(\"Tavily API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
@@ -93,29 +92,34 @@
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the Tavily search tools, with "
|
||||
"Here we show how to instantiate an instance of the Tavily search tools, with"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-19T16:44:04.570451Z",
|
||||
"start_time": "2025-03-19T16:44:04.561713Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import TavilySearchResults\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"\n",
|
||||
"tool = TavilySearchResults(\n",
|
||||
"tool = TavilySearch(\n",
|
||||
" max_results=5,\n",
|
||||
" search_depth=\"advanced\",\n",
|
||||
" include_answer=True,\n",
|
||||
" include_raw_content=True,\n",
|
||||
" include_images=True,\n",
|
||||
" # include_domains=[...],\n",
|
||||
" # exclude_domains=[...],\n",
|
||||
" # name=\"...\", # overwrite default tool name\n",
|
||||
" # description=\"...\", # overwrite default tool description\n",
|
||||
" # args_schema=..., # overwrite default args_schema: BaseModel\n",
|
||||
" topic=\"general\",\n",
|
||||
" # include_answer=False,\n",
|
||||
" # include_raw_content=False,\n",
|
||||
" # include_images=False,\n",
|
||||
" # include_image_descriptions=False,\n",
|
||||
" # search_depth=\"basic\",\n",
|
||||
" # time_range=\"day\",\n",
|
||||
" # include_domains=None,\n",
|
||||
" # exclude_domains=None\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -128,31 +132,57 @@
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools)\n",
|
||||
"\n",
|
||||
"The `TavilySearchResults` tool takes a single \"query\" argument, which should be a natural language query:"
|
||||
"The Tavily search tool accepts the following arguments during invocation:\n",
|
||||
"- `query` (required): A natural language search query\n",
|
||||
"- The following arguments can also be set during invokation : `include_images`, `search_depth` , `time_range`, `include_domains`, `exclude_domains`, `include_images`\n",
|
||||
"- For reliability and performance reasons, certain parameters that affect response size cannot be modified during invocation: `include_answer` and `include_raw_content`. These limitations prevent unexpected context window issues and ensure consistent results.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"NOTE: The optional arguments are available for agents to dynamically set, if you set a argument during instantiation and then invoke the tool with a different value, the tool will use the value you passed during invokation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'url': 'https://www.theguardian.com/sport/live/2023/jul/16/wimbledon-mens-singles-final-2023-carlos-alcaraz-v-novak-djokovic-live?page=with:block-64b3ff568f08df28470056bf',\n",
|
||||
" 'content': 'Carlos Alcaraz recovered from a set down to topple Djokovic 1-6, 7-6(6), 6-1, 3-6, 6-4 and win his first Wimbledon title in a battle for the ages'},\n",
|
||||
" {'url': 'https://www.nytimes.com/athletic/live-blogs/wimbledon-2024-live-updates-alcaraz-djokovic-mens-final-result/kJJdTKhOgkZo/',\n",
|
||||
" 'content': \"It was Djokovic's first straight-sets defeat at Wimbledon since the 2013 final, when he lost to Andy Murray. Below, The Athletic 's writers, Charlie Eccleshare and Matt Futterman, analyze the ...\"},\n",
|
||||
" {'url': 'https://www.foxsports.com.au/tennis/wimbledon/fk-you-stars-explosion-stuns-wimbledon-as-massive-final-locked-in/news-story/41cf7d28a12845cdab6be4150a22a170',\n",
|
||||
" 'content': 'The last time Djokovic and Wimbledon met was at the French Open in June when the Serb claimed victory in a third round tie which ended at 3:07 in the morning. On Friday, however, Djokovic was ...'},\n",
|
||||
" {'url': 'https://www.cnn.com/2024/07/09/sport/novak-djokovic-wimbledon-crowd-quarterfinals-spt-intl/index.html',\n",
|
||||
" 'content': 'Novak Djokovic produced another impressive performance at Wimbledon on Monday to cruise into the quarterfinals, but the 24-time grand slam champion was far from happy after his win.'},\n",
|
||||
" {'url': 'https://www.cnn.com/2024/07/05/sport/andy-murray-wimbledon-farewell-ceremony-spt-intl/index.html',\n",
|
||||
" 'content': \"It was an emotional night for three-time grand slam champion Andy Murray on Thursday, as the 37-year-old's Wimbledon farewell began with doubles defeat.. Murray will retire from the sport this ...\"}]"
|
||||
"{'query': 'What happened at the last wimbledon',\n",
|
||||
" 'follow_up_questions': None,\n",
|
||||
" 'answer': None,\n",
|
||||
" 'images': [],\n",
|
||||
" 'results': [{'title': \"Andy Murray pulls out of the men's singles draw at his last Wimbledon\",\n",
|
||||
" 'url': 'https://www.nbcnews.com/news/sports/andy-murray-wimbledon-tennis-singles-draw-rcna159912',\n",
|
||||
" 'content': \"NBC News Now LONDON — Andy Murray, one of the last decade's most successful male tennis players, has pulled out of the singles tournament at what is almost certain to be his last Wimbledon, his team confirmed Tuesday. Murray, 37, who has won the Wimbledon singles title twice and the U.S Open once, has been battling to be fit to play at the All England Club for weeks. “Unfortunately, despite working incredibly hard on his recovery since his operation just over a week ago, Andy has taken the very difficult decision not to play the singles this year,” his team said in a statement reported by Sky News. The news caps a glittering career on the ATP singles tour, which placed Murray at No. 1 in the world for 41 weeks.\",\n",
|
||||
" 'score': 0.67527276,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Andy Murray brought to tears by emotional ceremony as Wimbledon ...',\n",
|
||||
" 'url': 'https://edition.cnn.com/2024/07/05/sport/andy-murray-wimbledon-farewell-ceremony-spt-intl/index.html',\n",
|
||||
" 'content': 'Andy Murray brought to tears by emotional ceremony as Wimbledon farewell begins with doubles defeat | CNN Football Tennis Golf Motorsport US Sports Olympics Climbing Esports Hockey CNN10 About CNN Andy Murray became emotional when speaking on court following his Wimbledon defeat on Thursday. It was an emotional night for three-time grand slam champion Andy Murray on Thursday, as the 37-year-old’s Wimbledon farewell began with doubles defeat. Following a doubles defeat alongside his brother Jamie on Thursday, Murray was moved to tears after a short ceremony on Centre Court in which a montage of his career played out on big screens. Murray watches on as a video montage of his career highlights plays on the big screens at Wimbledon. CNN10 About CNN',\n",
|
||||
" 'score': 0.43482184,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Wimbledon - Latest News, Headlines and Entertainment from the BBC',\n",
|
||||
" 'url': 'https://www.bbc.co.uk/news/topics/c1kr68g26j9t',\n",
|
||||
" 'content': \"Wimbledon - Latest News, Headlines and Entertainment from the BBC BBC Homepage Search BBC Close menu BBC News BBC Verify World News TV Weather for Wimbledon London London Disabled people share experience of accessible homes London Man's pop-up urinal death may never be explained, family fears London London London London London London London Met PC jailed for assaulting man in hospital bed London London London Man jumped to his death in police station - inquest London Central London YMCA closes after failed injunction Kerr denies 'whiteness as insult' against police Man denies being getaway driver in £1m watch raid About the BBC Contact the BBC BBC emails for you The BBC is not responsible for the content of external sites.\",\n",
|
||||
" 'score': 0.3916624,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Wimbledon - latest news, breaking stories and comment - The Independent',\n",
|
||||
" 'url': 'https://www.independent.co.uk/topic/wimbledon',\n",
|
||||
" 'content': \"Independent Australian Open champion Jannik Sinner's style draws comparisons to Novak Djokovic Patten wins second grand slam doubles title after Australian Open epic Australian Open: Madison Keys can win her first Slam title and stop Aryna Sabalenka's threepeat Novak Djokovic hits back to beat Carlos Alcaraz in Australian Open thriller Australian Open 2025: Carlos Alcaraz and Jannik Sinner have a real rivalry atop men's tennis Australian Open 2025: Carlos Alcaraz and Jannik Sinner have a real rivalry atop men's tennis Australian Open 2025: Cases involving Jannik Sinner and Iga Swiatek make doping a top topic Australian Open 2025: There really isn't much time off in the offseason for tennis players Jd Sports Discount Code\",\n",
|
||||
" 'score': 0.3539422,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Novak Djokovic loses to Carlos Alcaraz Wimbledon final',\n",
|
||||
" 'url': 'https://www.wimbledon.com/en_GB/news/articles/2023-07-16/alcaraz_ends_the_djokovic_run.html',\n",
|
||||
" 'content': 'Password* By joining myWimbledon you are confirming you are happy to receive news and information from The All England Lawn Tennis Club regarding The Club, The Championships and The Grounds via email By joining myWimbledon you are confirming you are happy to receive news and information from The All England Lawn Tennis Club regarding The Club, The Championships and The Grounds via email Please enter your email address to update your password: We have sent details on how to update your password to the email address you provided. A verification email with a link to verify your account has been sent to you. Please enter the code sent to your email address below and click SUBMIT to complete the verification.',\n",
|
||||
" 'score': 0.23453853,\n",
|
||||
" 'raw_content': None}],\n",
|
||||
" 'response_time': 1.43}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -181,7 +211,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{\"url\": \"https://www.radiotimes.com/tv/sport/football/euro-2024-location/\", \"content\": \"Euro 2024 host cities. Germany have 10 host cities for Euro 2024, topped by the country's capital Berlin. Berlin. Cologne. Dortmund. Dusseldorf. Frankfurt. Gelsenkirchen. Hamburg.\"}, {\"url\": \"https://www.sportingnews.com/ca/soccer/news/list-euros-host-nations-uefa-european-championship-countries/85f8069d69c9f4\n"
|
||||
"{\"query\": \"euro 2024 host nation\", \"follow_up_questions\": null, \"answer\": null, \"images\": [], \"results\": [{\"title\": \"UEFA Euro 2024 - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/UEFA_Euro_2024\", \"content\": \"Tournament details Host country Germany Dates 14 June – 14 July Teams 24 Venue(s) 10 (in 10 host cities) Final positions Champions Spain (4th title) Runners-up England Tournament statisti\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -199,69 +229,16 @@
|
||||
"print(tool_msg.content[:400])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "d8e27be0-1098-4688-8d8c-6e257aae8d56",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': str,\n",
|
||||
" 'follow_up_questions': NoneType,\n",
|
||||
" 'answer': str,\n",
|
||||
" 'images': list,\n",
|
||||
" 'results': list,\n",
|
||||
" 'response_time': float}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# The artifact is a dict with richer, raw results\n",
|
||||
"{k: type(v) for k, v in tool_msg.artifact.items()}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "237ca620-ac31-449a-826b-b4f2e265b194",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"query\": \"euro 2024 host nation\",\n",
|
||||
" \"follow_up_questions\": \"None\",\n",
|
||||
" \"answer\": \"Germany will be the host nation for Euro 2024, with the tournament scheduled to take place from June 14 to July 14. The matches will be held in 10 different cities across Germany, including Berlin, Co\",\n",
|
||||
" \"images\": \"['https://i.ytimg.com/vi/3hsX0vLatNw/maxresdefault.jpg', 'https://img.planetafobal.com/2021/10/sedes-uefa-euro-2024-alemania-fg.jpg', 'https://editorial.uefa.com/resources/0274-14fe4fafd0d4-413fc8a7b7\",\n",
|
||||
" \"results\": \"[{'title': 'Where is Euro 2024? Country, host cities and venues', 'url': 'https://www.radiotimes.com/tv/sport/football/euro-2024-location/', 'content': \\\"Euro 2024 host cities. Germany have 10 host cit\",\n",
|
||||
" \"response_time\": \"3.97\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"# Abbreviate the results for demo purposes\n",
|
||||
"print(json.dumps({k: str(v)[:200] for k, v in tool_msg.artifact.items()}, indent=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n",
|
||||
"We can use our tools directly with an agent executor by binding the tool to the agent. This gives the agent the ability to dynamically set the available arguments to the Tavily search tool.\n",
|
||||
"\n",
|
||||
"In the below example when we ask the agent to find \"What is the most popular sport in the world? include only wikipedia sources\" the agent will dynamically set the argments and invoke Tavily search tool : Invoking `tavily_search` with `{'query': 'most popular sport in the world', 'include_domains': ['wikipedia.org']`\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
@@ -270,7 +247,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"id": "b14d41f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OPENAI_API_KEY:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -286,17 +274,33 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 15,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `tavily_search` with `{'query': 'most popular sport in the world', 'include_domains': ['wikipedia.org']}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m{'query': 'most popular sport in the world', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Sport - Wikipedia', 'url': 'https://en.wikipedia.org/wiki/Sport', 'content': 'The world\\'s most accessible and practised sport is running, while association football is the most popular spectator sport. [7] Meaning and usage. Etymology. The word \"sport\" comes from the Old French desport meaning \"leisure\", with the oldest definition in English from around 1300 being \"anything humans find amusing or entertaining\". [8]', 'score': 0.69746524, 'raw_content': None}, {'title': 'List of sports - Wikipedia', 'url': 'https://en.wikipedia.org/wiki/List_of_sports', 'content': \"According to the World Sports Encyclopaedia (2003), there are 8,000 known indigenous sports and sporting games. [1] Acrobatic arts Cheerleading. Breakdancing ... If just 2% of our most loyal readers gave $2.75 today, we'd hit our goal in a few hours. Most readers don't donate, so if Wikipedia has given you $2.75 worth of knowledge, please give\", 'score': 0.59344494, 'raw_content': None}, {'title': 'List of association football attendance records - Wikipedia', 'url': 'https://en.wikipedia.org/wiki/List_of_association_football_attendance_records', 'content': 'Association football, more commonly known as \"football\" or \"soccer\" is the most popular sport at 3.5 billion fans. [1] [2] ... Uruguay v Brazil in the 1950 FIFA World Cup was officially spectated by 173,850 people but also determines there may have been closer to 200,000.', 'score': 0.5820878, 'raw_content': None}, {'title': 'Sports in the United States - Wikipedia', 'url': 'https://en.wikipedia.org/wiki/Sports_in_the_United_States', 'content': \"Sports in the United States are an important part of the nation's culture.Historically, the most popular sport has been baseball.However, in more recent decades, American football has been the most popular spectator sport based on broadcast viewership audience. Basketball has grown into the mainstream American sports scene since the 1980s, with ice hockey and soccer doing the same around the\", 'score': 0.5565207, 'raw_content': None}, {'title': 'Western sports - Wikipedia', 'url': 'https://en.wikipedia.org/wiki/Western_sports', 'content': 'A depiction of the FIFA World Cup, the most popular sporting event in the world.. Western sports are sports that are strongly associated with the West. [a] Many modern sports were invented in or standardized by Western countries; [1] in particular, many major sports were invented in the United Kingdom after the Industrial Revolution, [2] [3] and later, America invented some major sports such', 'score': 0.52700067, 'raw_content': None}], 'response_time': 1.55}\u001b[0m\u001b[32;1m\u001b[1;3mThe most popular sport in the world is association football, commonly known as \"football\" or \"soccer,\" with approximately 3.5 billion fans. You can find more information on this topic on [Wikipedia](https://en.wikipedia.org/wiki/List_of_association_football_attendance_records).\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The last women's singles champion at Wimbledon was Markéta Vondroušová, who won the title in 2023.\", response_metadata={'token_usage': {'completion_tokens': 26, 'prompt_tokens': 802, 'total_tokens': 828}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_4e2b2da518', 'finish_reason': 'stop', 'logprobs': None}, id='run-2bfeec6e-8f04-477e-bf51-9500f18bd514-0', usage_metadata={'input_tokens': 802, 'output_tokens': 26, 'total_tokens': 828})"
|
||||
"'The most popular sport in the world is association football, commonly known as \"football\" or \"soccer,\" with approximately 3.5 billion fans. You can find more information on this topic on [Wikipedia](https://en.wikipedia.org/wiki/List_of_association_football_attendance_records).'"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -304,41 +308,49 @@
|
||||
"source": [
|
||||
"import datetime\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableConfig, chain\n",
|
||||
"from langchain.agents import AgentExecutor, create_openai_tools_agent\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"\n",
|
||||
"# Initialize LLM\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
|
||||
"\n",
|
||||
"# Initialize Tavily Search Tool\n",
|
||||
"tavily_search_tool = TavilySearch(\n",
|
||||
" max_results=5,\n",
|
||||
" topic=\"general\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Set up Prompt with 'agent_scratchpad'\n",
|
||||
"today = datetime.datetime.today().strftime(\"%D\")\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", f\"You are a helpful assistant. The date today is {today}.\"),\n",
|
||||
" (\"human\", \"{user_input}\"),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" f\"\"\"You are a helpful reaserch assistant, you will be given a query and you will need to \n",
|
||||
" search the web for the most relevant information. The date today is {today}.\"\"\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
||||
" MessagesPlaceholder(\n",
|
||||
" variable_name=\"agent_scratchpad\"\n",
|
||||
" ), # Required for tool calls\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# specifying tool_choice will force the model to call this tool.\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"# Create an agent that can use tools\n",
|
||||
"agent = create_openai_tools_agent(llm=llm, tools=[tavily_search_tool], prompt=prompt)\n",
|
||||
"\n",
|
||||
"llm_chain = prompt | llm_with_tools\n",
|
||||
"# Create an Agent Executor to handle tool execution\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=[tavily_search_tool], verbose=True)\n",
|
||||
"\n",
|
||||
"user_input = (\n",
|
||||
" \"What is the most popular sport in the world? include only wikipedia sources\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def tool_chain(user_input: str, config: RunnableConfig):\n",
|
||||
" input_ = {\"user_input\": user_input}\n",
|
||||
" ai_msg = llm_chain.invoke(input_, config=config)\n",
|
||||
" tool_msgs = tool.batch(ai_msg.tool_calls, config=config)\n",
|
||||
" return llm_chain.invoke({**input_, \"messages\": [ai_msg, *tool_msgs]}, config=config)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool_chain.invoke(\"who won the last womens singles wimbledon\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fb115693-e89e-40f2-a460-0d0d39a17963",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here's the [LangSmith trace](https://smith.langchain.com/public/b43232c1-b243-4a7f-afeb-5fba8c84ba56/r) for this run."
|
||||
"# Construct input properly as a dictionary\n",
|
||||
"response = agent_executor.invoke({\"messages\": [HumanMessage(content=user_input)]})\n",
|
||||
"response[\"output\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -348,15 +360,15 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all TavilySearchResults features and configurations head to the API reference: https://python.langchain.com/api_reference/community/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html"
|
||||
"For detailed documentation of all Tavily Search API features and configurations head to the API reference: https://docs.tavily.com/documentation/api-reference/endpoint/search"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
439
docs/docs/integrations/tools/valthera.ipynb
Normal file
439
docs/docs/integrations/tools/valthera.ipynb
Normal file
@@ -0,0 +1,439 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Valthera\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Valthera\n",
|
||||
"\n",
|
||||
"Enable AI agents to engage users when they're most likely to respond.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"Valthera is an open-source framework that enables LLM Agents to engage users in a more meaningful way. It is built on BJ Fogg's Behavior Model (B=MAT) and leverages data from multiple sources (such as HubSpot, PostHog, and Snowflake) to assess a user's **motivation** and **ability** before triggering an action.\n",
|
||||
"\n",
|
||||
"In this guide, you'll learn:\n",
|
||||
"\n",
|
||||
"- **Core Concepts:** Overview of the components (Data Aggregator, Scorer, Reasoning Engine, and Trigger Generator).\n",
|
||||
"- **System Architecture:** How data flows through the system and how decisions are made.\n",
|
||||
"- **Customization:** How to extend connectors, scoring metrics, and decision rules to fit your needs.\n",
|
||||
"\n",
|
||||
"Let's dive in!\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"This section covers installation of dependencies and setting up custom data connectors for Valthera."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install openai langchain langchain_openai valthera langchain_valthera langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from valthera.connectors.base import BaseConnector\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MockHubSpotConnector(BaseConnector):\n",
|
||||
" \"\"\"\n",
|
||||
" Simulates data retrieval from HubSpot. Provides information such as lead score,\n",
|
||||
" lifecycle stage, and marketing metrics.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" def get_user_data(self, user_id: str) -> Dict[str, Any]:\n",
|
||||
" \"\"\"\n",
|
||||
" Retrieve mock HubSpot data for a given user.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: The unique identifier for the user\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" A dictionary containing HubSpot user data\n",
|
||||
" \"\"\"\n",
|
||||
" return {\n",
|
||||
" \"hubspot_contact_id\": \"999-ZZZ\",\n",
|
||||
" \"lifecycle_stage\": \"opportunity\",\n",
|
||||
" \"lead_status\": \"engaged\",\n",
|
||||
" \"hubspot_lead_score\": 100,\n",
|
||||
" \"company_name\": \"MaxMotivation Corp.\",\n",
|
||||
" \"last_contacted_date\": \"2023-09-20\",\n",
|
||||
" \"hubspot_marketing_emails_opened\": 20,\n",
|
||||
" \"marketing_emails_clicked\": 10,\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MockPostHogConnector(BaseConnector):\n",
|
||||
" \"\"\"\n",
|
||||
" Simulates data retrieval from PostHog. Provides session data and engagement events.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" def get_user_data(self, user_id: str) -> Dict[str, Any]:\n",
|
||||
" \"\"\"\n",
|
||||
" Retrieve mock PostHog data for a given user.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: The unique identifier for the user\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" A dictionary containing PostHog user data\n",
|
||||
" \"\"\"\n",
|
||||
" return {\n",
|
||||
" \"distinct_ids\": [user_id, f\"email_{user_id}\"],\n",
|
||||
" \"last_event_timestamp\": \"2023-09-20T12:34:56Z\",\n",
|
||||
" \"feature_flags\": [\"beta_dashboard\", \"early_access\"],\n",
|
||||
" \"posthog_session_count\": 30,\n",
|
||||
" \"avg_session_duration_sec\": 400,\n",
|
||||
" \"recent_event_types\": [\"pageview\", \"button_click\", \"premium_feature_used\"],\n",
|
||||
" \"posthog_events_count_past_30days\": 80,\n",
|
||||
" \"posthog_onboarding_steps_completed\": 5,\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MockSnowflakeConnector(BaseConnector):\n",
|
||||
" \"\"\"\n",
|
||||
" Simulates retrieval of additional user profile data from Snowflake.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" def get_user_data(self, user_id: str) -> Dict[str, Any]:\n",
|
||||
" \"\"\"\n",
|
||||
" Retrieve mock Snowflake data for a given user.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: The unique identifier for the user\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" A dictionary containing Snowflake user data\n",
|
||||
" \"\"\"\n",
|
||||
" return {\n",
|
||||
" \"user_id\": user_id,\n",
|
||||
" \"email\": f\"{user_id}@example.com\",\n",
|
||||
" \"subscription_status\": \"paid\",\n",
|
||||
" \"plan_tier\": \"premium\",\n",
|
||||
" \"account_creation_date\": \"2023-01-01\",\n",
|
||||
" \"preferred_language\": \"en\",\n",
|
||||
" \"last_login_datetime\": \"2023-09-20T12:00:00Z\",\n",
|
||||
" \"behavior_complexity\": 3,\n",
|
||||
" }"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"In this section, we instantiate the core components. First, we create a Data Aggregator to combine data from the custom connectors. Then, we configure the scoring metrics for motivation and ability."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from valthera.aggregator import DataAggregator\n",
|
||||
"\n",
|
||||
"# Constants for configuration\n",
|
||||
"LEAD_SCORE_MAX = 100\n",
|
||||
"EVENTS_COUNT_MAX = 50\n",
|
||||
"EMAILS_OPENED_FACTOR = 10.0\n",
|
||||
"SESSION_COUNT_FACTOR_1 = 5.0\n",
|
||||
"ONBOARDING_STEPS_FACTOR = 5.0\n",
|
||||
"SESSION_COUNT_FACTOR_2 = 10.0\n",
|
||||
"BEHAVIOR_COMPLEXITY_MAX = 5.0\n",
|
||||
"\n",
|
||||
"# Initialize data aggregator\n",
|
||||
"data_aggregator = DataAggregator(\n",
|
||||
" connectors={\n",
|
||||
" \"hubspot\": MockHubSpotConnector(),\n",
|
||||
" \"posthog\": MockPostHogConnector(),\n",
|
||||
" \"snowflake\": MockSnowflakeConnector(),\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# You can now fetch unified user data by calling data_aggregator.get_user_context(user_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable, Union\n",
|
||||
"\n",
|
||||
"from valthera.scorer import ValtheraScorer\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define transform functions with proper type annotations\n",
|
||||
"def transform_lead_score(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform lead score to a value between 0 and 1.\"\"\"\n",
|
||||
" return min(x, LEAD_SCORE_MAX) / LEAD_SCORE_MAX\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_events_count(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform events count to a value between 0 and 1.\"\"\"\n",
|
||||
" return min(x, EVENTS_COUNT_MAX) / EVENTS_COUNT_MAX\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_emails_opened(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform emails opened to a value between 0 and 1.\"\"\"\n",
|
||||
" return min(x / EMAILS_OPENED_FACTOR, 1.0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_session_count_1(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform session count for motivation to a value between 0 and 1.\"\"\"\n",
|
||||
" return min(x / SESSION_COUNT_FACTOR_1, 1.0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_onboarding_steps(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform onboarding steps to a value between 0 and 1.\"\"\"\n",
|
||||
" return min(x / ONBOARDING_STEPS_FACTOR, 1.0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_session_count_2(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform session count for ability to a value between 0 and 1.\"\"\"\n",
|
||||
" return min(x / SESSION_COUNT_FACTOR_2, 1.0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_behavior_complexity(x: Union[int, float]) -> float:\n",
|
||||
" \"\"\"Transform behavior complexity to a value between 0 and 1.\"\"\"\n",
|
||||
" return 1 - (min(x, BEHAVIOR_COMPLEXITY_MAX) / BEHAVIOR_COMPLEXITY_MAX)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Scoring configuration for user motivation\n",
|
||||
"motivation_config = [\n",
|
||||
" {\"key\": \"hubspot_lead_score\", \"weight\": 0.30, \"transform\": transform_lead_score},\n",
|
||||
" {\n",
|
||||
" \"key\": \"posthog_events_count_past_30days\",\n",
|
||||
" \"weight\": 0.30,\n",
|
||||
" \"transform\": transform_events_count,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"key\": \"hubspot_marketing_emails_opened\",\n",
|
||||
" \"weight\": 0.20,\n",
|
||||
" \"transform\": transform_emails_opened,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"key\": \"posthog_session_count\",\n",
|
||||
" \"weight\": 0.20,\n",
|
||||
" \"transform\": transform_session_count_1,\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Scoring configuration for user ability\n",
|
||||
"ability_config = [\n",
|
||||
" {\n",
|
||||
" \"key\": \"posthog_onboarding_steps_completed\",\n",
|
||||
" \"weight\": 0.30,\n",
|
||||
" \"transform\": transform_onboarding_steps,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"key\": \"posthog_session_count\",\n",
|
||||
" \"weight\": 0.30,\n",
|
||||
" \"transform\": transform_session_count_2,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"key\": \"behavior_complexity\",\n",
|
||||
" \"weight\": 0.40,\n",
|
||||
" \"transform\": transform_behavior_complexity,\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Instantiate the scorer\n",
|
||||
"scorer = ValtheraScorer(motivation_config, ability_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"Next, we set up the Reasoning Engine and Trigger Generator, then bring all components together by instantiating the Valthera Tool. Finally, we execute the agent workflow to process an input message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from valthera.reasoning_engine import ReasoningEngine\n",
|
||||
"\n",
|
||||
"# Define threshold as constant\n",
|
||||
"SCORE_THRESHOLD = 0.75\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Function to safely get API key\n",
|
||||
"def get_openai_api_key() -> str:\n",
|
||||
" \"\"\"Get OpenAI API key with error handling.\"\"\"\n",
|
||||
" api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
|
||||
" if not api_key:\n",
|
||||
" raise ValueError(\"OPENAI_API_KEY not found in environment variables\")\n",
|
||||
" return api_key\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Decision rules using constant\n",
|
||||
"decision_rules = [\n",
|
||||
" {\n",
|
||||
" \"condition\": f\"motivation >= {SCORE_THRESHOLD} and ability >= {SCORE_THRESHOLD}\",\n",
|
||||
" \"action\": \"trigger\",\n",
|
||||
" \"description\": \"Both scores are high enough.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"condition\": f\"motivation < {SCORE_THRESHOLD}\",\n",
|
||||
" \"action\": \"improve_motivation\",\n",
|
||||
" \"description\": \"User motivation is low.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"condition\": f\"ability < {SCORE_THRESHOLD}\",\n",
|
||||
" \"action\": \"improve_ability\",\n",
|
||||
" \"description\": \"User ability is low.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"condition\": \"otherwise\",\n",
|
||||
" \"action\": \"defer\",\n",
|
||||
" \"description\": \"No action needed at this time.\",\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" api_key = get_openai_api_key()\n",
|
||||
" reasoning_engine = ReasoningEngine(\n",
|
||||
" llm=ChatOpenAI(\n",
|
||||
" model_name=\"gpt-4-turbo\", temperature=0.0, openai_api_key=api_key\n",
|
||||
" ),\n",
|
||||
" decision_rules=decision_rules,\n",
|
||||
" )\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(f\"Error initializing reasoning engine: {e}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from valthera.trigger_generator import TriggerGenerator\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" api_key = get_openai_api_key() # Reuse the function for consistency\n",
|
||||
" trigger_generator = TriggerGenerator(\n",
|
||||
" llm=ChatOpenAI(\n",
|
||||
" model_name=\"gpt-4-turbo\", temperature=0.7, openai_api_key=api_key\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"except ValueError as e:\n",
|
||||
" print(f\"Error initializing trigger generator: {e}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_valthera.tools import ValtheraTool\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" api_key = get_openai_api_key()\n",
|
||||
"\n",
|
||||
" # Initialize Valthera tool\n",
|
||||
" valthera_tool = ValtheraTool(\n",
|
||||
" data_aggregator=data_aggregator,\n",
|
||||
" motivation_config=motivation_config,\n",
|
||||
" ability_config=ability_config,\n",
|
||||
" reasoning_engine=reasoning_engine,\n",
|
||||
" trigger_generator=trigger_generator,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Create agent with LLM\n",
|
||||
" llm = ChatOpenAI(model_name=\"gpt-4-turbo\", temperature=0.0, openai_api_key=api_key)\n",
|
||||
" tools = [valthera_tool]\n",
|
||||
" graph = create_react_agent(llm, tools=tools)\n",
|
||||
"\n",
|
||||
" # Define input message for testing\n",
|
||||
" inputs = {\n",
|
||||
" \"messages\": [(\"user\", \"Evaluate behavior for user_12345: Finish Onboarding\")]\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" # Process the input and display responses\n",
|
||||
" print(\"Running Valthera agent workflow...\")\n",
|
||||
" for response in graph.stream(inputs, stream_mode=\"values\"):\n",
|
||||
" print(response)\n",
|
||||
"\n",
|
||||
"except Exception as e:\n",
|
||||
" print(f\"Error running Valthera workflow: {e}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"This integration does not currently support chaining operations. Future releases may include chaining support."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"Below is an overview of the key APIs provided by the Valthera integration:\n",
|
||||
"\n",
|
||||
"- **Data Aggregator:** Use `data_aggregator.get_user_context(user_id)` to fetch aggregated user data.\n",
|
||||
"- **Scorer:** The `ValtheraScorer` computes motivation and ability scores based on the provided configurations.\n",
|
||||
"- **Reasoning Engine:** The `ReasoningEngine` evaluates decision rules to determine the appropriate action (trigger, improve motivation, improve ability, or defer).\n",
|
||||
"- **Trigger Generator:** Generates personalized trigger messages using the LLM.\n",
|
||||
"- **Valthera Tool:** Integrates all the components to process inputs and execute the agent workflow.\n",
|
||||
"\n",
|
||||
"For detailed usage, refer to the inline documentation in the source code."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"title": "Valthera Developer Guide"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -505,7 +505,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `AstraDBVectorStore` features and configurations head to the API reference:https://python.langchain.com/api_reference/astradb/vectorstores/langchain_astradb.vectorstores.AstraDBVectorStore.html"
|
||||
"For detailed documentation of all `AstraDBVectorStore` features and configurations head to the API reference: https://python.langchain.com/api_reference/astradb/vectorstores/langchain_astradb.vectorstores.AstraDBVectorStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_milvus"
|
||||
"pip install -qU langchain_milvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,13 +115,13 @@
|
||||
"id": "df34e8f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Milvus Standalone\n",
|
||||
"### Milvus Server\n",
|
||||
"\n",
|
||||
"If you have a large amount of data (e.g., more than a million vectors), we recommend setting up a more performant Milvus server on [Docker](https://milvus.io/docs/install_standalone-docker.md#Start-Milvus) or [Kubernetes](https://milvus.io/docs/install_cluster-milvusoperator.md).\n",
|
||||
"\n",
|
||||
"Milvus Standalone also supports different [indexes](https://milvus.io/docs/index.md?tab=floating), if you want to improve retrieval functionality.\n",
|
||||
"The Milvus server offers support for a variety of [indexes](https://milvus.io/docs/index.md?tab=floating). Leveraging these different indexes can significantly enhance the retrieval capabilities and expedite the retrieval process, tailored to your specific requirements.\n",
|
||||
"\n",
|
||||
"To launch the Docker container, run:"
|
||||
"As an illustration, consider the case of Milvus Standalone. To initiate the Docker container, you can run the following command:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -206,7 +206,7 @@
|
||||
"source": [
|
||||
"Note the change in the URI below. Once the instance is initialized, navigate to http://127.0.0.1:9091/webui to view the local web UI.\n",
|
||||
"\n",
|
||||
"Here is an example of how you would use a dense embedding + the Milvus BM25 built-in function to assemble a hybrid retrieval vector store instance:"
|
||||
"Here is an example of how you create your vector store instance with the Milvus database serivce:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -218,28 +218,25 @@
|
||||
"source": [
|
||||
"from langchain_milvus import BM25BuiltInFunction, Milvus\n",
|
||||
"\n",
|
||||
"dense_index_param = {\n",
|
||||
" \"metric_type\": \"COSINE\",\n",
|
||||
" \"index_type\": \"HNSW\",\n",
|
||||
"}\n",
|
||||
"sparse_index_param = {\n",
|
||||
" \"metric_type\": \"BM25\",\n",
|
||||
" \"index_type\": \"AUTOINDEX\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"URI = \"http://localhost:19530\"\n",
|
||||
"\n",
|
||||
"vectorstore = Milvus(\n",
|
||||
" embedding_function=embeddings,\n",
|
||||
" builtin_function=BM25BuiltInFunction(output_field_names=\"sparse\"),\n",
|
||||
" index_params=[dense_index_param, sparse_index_param],\n",
|
||||
" vector_field=[\"dense\", \"sparse\"],\n",
|
||||
" connection_args={\"uri\": URI, \"token\": \"root:Milvus\", \"db_name\": \"milvus_demo\"},\n",
|
||||
" index_params={\"index_type\": \"FLAT\", \"metric_type\": \"L2\"},\n",
|
||||
" consistency_level=\"Strong\",\n",
|
||||
" drop_old=False, # set to True if seeking to drop the collection with that name if it exists\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d5a9670",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> If you want to use Zilliz Cloud, the fully managed cloud service for Milvus, please adjust the uri and token, which correspond to the [Public Endpoint](https://docs.zilliz.com/docs/byoc/quick-start#free-cluster-details) and [Api key](https://docs.zilliz.com/docs/byoc/quick-start#free-cluster-details) in Zilliz Cloud."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cae1a7d5",
|
||||
@@ -552,6 +549,110 @@
|
||||
"retriever.invoke(\"Stealing from the bank is a crime\", filter={\"source\": \"news\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8edb47106e1a46a883d545849b8ab81b",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"## Hybrid Search\n",
|
||||
"\n",
|
||||
"The most common hybrid search scenario is the dense + sparse hybrid search, where candidates are retrieved using both semantic vector similarity and precise keyword matching. Results from these methods are merged, reranked, and passed to an LLM to generate the final answer. This approach balances precision and semantic understanding, making it highly effective for diverse query scenarios.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Full-text search\n",
|
||||
"Since [Milvus 2.5](https://milvus.io/blog/introduce-milvus-2-5-full-text-search-powerful-metadata-filtering-and-more.md), full-text search is natively supported through the Sparse-BM25 approach, by representing the BM25 algorithm as sparse vectors. Milvus accepts raw text as input and automatically converts it into sparse vectors stored in a specified field, eliminating the need for manual sparse embedding generation.\n",
|
||||
"\n",
|
||||
"For full-text search Milvus VectorStore accepts a `builtin_function` parameter. Through this parameter, you can pass in an instance of the `BM25BuiltInFunction`. This is different than semantic search which usually passes dense embeddings to the `VectorStore`,\n",
|
||||
"\n",
|
||||
"Here is a simple example of hybrid search in Milvus with OpenAI dense embedding for semantic search and BM25 for full-text search:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "10185d26023b46108eb7d9f57d49d2b3",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_milvus import BM25BuiltInFunction, Milvus\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Milvus.from_documents(\n",
|
||||
" documents=documents,\n",
|
||||
" embedding=OpenAIEmbeddings(),\n",
|
||||
" builtin_function=BM25BuiltInFunction(),\n",
|
||||
" # `dense` is for OpenAI embeddings, `sparse` is the output field of BM25 function\n",
|
||||
" vector_field=[\"dense\", \"sparse\"],\n",
|
||||
" connection_args={\n",
|
||||
" \"uri\": URI,\n",
|
||||
" },\n",
|
||||
" consistency_level=\"Strong\",\n",
|
||||
" drop_old=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8763a12b2bbd4a93a75aff182afb95dc",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"> - When you use `BM25BuiltInFunction`, please note that the full-text search is available in Milvus Standalone and Milvus Distributed, but not in Milvus Lite, although it is on the roadmap for future inclusion. It will also be available in Zilliz Cloud (fully-managed Milvus) soon. Please reach out to support@zilliz.com for more information.\n",
|
||||
"\n",
|
||||
"In the code above, we define an instance of `BM25BuiltInFunction` and pass it to the `Milvus` object. `BM25BuiltInFunction` is a lightweight wrapper class for [`Function`](https://milvus.io/docs/manage-collections.md#Function) in Milvus. We can use it with `OpenAIEmbeddings` to initialize a dense + sparse hybrid search Milvus vector store instance.\n",
|
||||
"\n",
|
||||
"`BM25BuiltInFunction` does not require the client to pass corpus or training, all are automatically processed at the Milvus server's end, so users do not need to care about any vocabulary and corpus. In addition, users can also customize the [analyzer](https://milvus.io/docs/analyzer-overview.md#Analyzer-Overview) to implement the custom text processing in the BM25."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7623eae2785240b9bd12b16a66d81610",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Rerank the candidates\n",
|
||||
"After the first stage of retrieval, we need to rerank the candidates to get a better result. You can refer to the [Reranking](https://milvus.io/docs/reranking.md#Reranking) for more information.\n",
|
||||
"\n",
|
||||
"Here is an example for weighted reranking:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7cdc8c89c7104fffa095e18ddfef8986",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What are the novels Lila has written and what are their contents?\"\n",
|
||||
"\n",
|
||||
"vectorstore.similarity_search(\n",
|
||||
" query, k=1, ranker_type=\"weighted\", ranker_params={\"weights\": [0.6, 0.4]}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3965036",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more information about Full-text search and Hybrid search, please refer to the [Using Full-Text Search with LangChain and Milvus](https://milvus.io/docs/full_text_search_with_langchain.md) and [Hybrid Retrieval with LangChain and Milvus](https://milvus.io/docs/milvus_hybrid_search_retriever.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8ac953f1",
|
||||
@@ -726,7 +827,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -113,22 +113,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"id": "979a65bd-742f-4b0d-be1e-c0baae245ec6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_postgres import PGVector\n",
|
||||
"from langchain_postgres.vectorstores import PGVector\n",
|
||||
"\n",
|
||||
"# See docker command above to launch a postgres instance with pgvector enabled.\n",
|
||||
"connection = \"postgresql+psycopg://langchain:langchain@localhost:6024/langchain\" # Uses psycopg3!\n",
|
||||
"collection_name = \"my_docs\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vector_store = PGVector(\n",
|
||||
" embeddings=embeddings,\n",
|
||||
" collection_name=collection_name,\n",
|
||||
@@ -169,6 +166,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"there are cats in the pond\",\n",
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-pinecone pinecone-notebooks"
|
||||
"pip install -qU langchain-pinecone pinecone-notebooks"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,7 +56,6 @@
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"from pinecone import Pinecone, ServerlessSpec\n",
|
||||
"\n",
|
||||
|
||||
@@ -8,11 +8,9 @@
|
||||
"source": [
|
||||
"# Qdrant\n",
|
||||
"\n",
|
||||
">[Qdrant](https://qdrant.tech/documentation/) (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage vectors with additional payload and extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications.\n",
|
||||
">[Qdrant](https://qdrant.tech/documentation/) (read: quadrant) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage vectors with additional payload and extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications.\n",
|
||||
"\n",
|
||||
"This documentation demonstrates how to use Qdrant with Langchain for dense/sparse and hybrid retrieval.\n",
|
||||
"\n",
|
||||
"> This page documents the `QdrantVectorStore` class that supports multiple retrieval modes via Qdrant's new [Query API](https://qdrant.tech/blog/qdrant-1.10.x/). It requires you to run Qdrant v1.10.0 or above.\n",
|
||||
"This documentation demonstrates how to use Qdrant with LangChain for dense (i.e., embedding-based), sparse (i.e., text search) and hybrid retrieval. The `QdrantVectorStore` class supports multiple retrieval modes via Qdrant's new [Query API](https://qdrant.tech/blog/qdrant-1.10.x/). It requires you to run Qdrant v1.10.0 or above.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
@@ -22,7 +20,7 @@
|
||||
"- Docker deployments\n",
|
||||
"- Qdrant Cloud\n",
|
||||
"\n",
|
||||
"See the [installation instructions](https://qdrant.tech/documentation/install/)."
|
||||
"Please see the installation instructions [here](https://qdrant.tech/documentation/install/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -34,7 +32,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-qdrant"
|
||||
"pip install -qU langchain-qdrant"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -70,11 +68,11 @@
|
||||
"\n",
|
||||
"### Local mode\n",
|
||||
"\n",
|
||||
"Python client allows you to run the same code in local mode without running the Qdrant server. That's great for testing things out and debugging or storing just a small amount of vectors. The embeddings might be fully kept in memory or persisted on disk.\n",
|
||||
"The Python client provides the option to run the code in local mode without running the Qdrant server. This is great for testing things out and debugging or storing just a small amount of vectors. The embeddings can be kept fully in-memory or persisted on-disk.\n",
|
||||
"\n",
|
||||
"#### In-memory\n",
|
||||
"\n",
|
||||
"For some testing scenarios and quick experiments, you may prefer to keep all the data in memory only, so it gets lost when the client is destroyed - usually at the end of your script/notebook.\n",
|
||||
"For some testing scenarios and quick experiments, you may prefer to keep all the data in-memory only, so it gets removed when the client is destroyed - usually at the end of your script/notebook.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"import EmbeddingTabs from \"@theme/EmbeddingTabs\";\n",
|
||||
@@ -135,7 +133,7 @@
|
||||
"source": [
|
||||
"#### On-disk storage\n",
|
||||
"\n",
|
||||
"Local mode, without using the Qdrant server, may also store your vectors on disk so they persist between runs."
|
||||
"Local mode, without using the Qdrant server, may also store your vectors on-disk so they persist between runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -173,7 +171,7 @@
|
||||
"source": [
|
||||
"### On-premise server deployment\n",
|
||||
"\n",
|
||||
"No matter if you choose to launch Qdrant locally with [a Docker container](https://qdrant.tech/documentation/install/), or select a Kubernetes deployment with [the official Helm chart](https://github.com/qdrant/qdrant-helm), the way you're going to connect to such an instance will be identical. You'll need to provide a URL pointing to the service."
|
||||
"No matter if you choose to launch Qdrant locally with [a Docker container](https://qdrant.tech/documentation/install/) or select a Kubernetes deployment with [the official Helm chart](https://github.com/qdrant/qdrant-helm), the way you're going to connect to such an instance will be identical. You'll need to provide a URL pointing to the service."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -280,42 +278,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "7697a362",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['c04134c3-273d-4766-949a-eee46052ad32',\n",
|
||||
" '9e6ba50c-794f-4b88-94e5-411f15052a02',\n",
|
||||
" 'd3202666-6f2b-4186-ac43-e35389de8166',\n",
|
||||
" '50d8d6ee-69bf-4173-a6a2-b254e9928965',\n",
|
||||
" 'bd2eae02-74b5-43ec-9fcf-09e9d9db6fd3',\n",
|
||||
" '6dae6b37-826d-4f14-8376-da4603b35de3',\n",
|
||||
" 'b0964ab5-5a14-47b4-a983-37fa5c5bd154',\n",
|
||||
" '91ed6c56-fe53-49e2-8199-c3bb3c33c3eb',\n",
|
||||
" '42a580cb-7469-4324-9927-0febab57ce92',\n",
|
||||
" 'ff774e5c-f158-4d12-94e2-0a0162b22f27']"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from uuid import uuid4\n",
|
||||
"\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"document_1 = Document(\n",
|
||||
" page_content=\"I had chocalate chip pancakes and scrambled eggs for breakfast this morning.\",\n",
|
||||
" page_content=\"I had chocolate chip pancakes and scrambled eggs for breakfast this morning.\",\n",
|
||||
" metadata={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_2 = Document(\n",
|
||||
" page_content=\"The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees.\",\n",
|
||||
" page_content=\"The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees Fahrenheit.\",\n",
|
||||
" metadata={\"source\": \"news\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -371,8 +349,16 @@
|
||||
" document_9,\n",
|
||||
" document_10,\n",
|
||||
"]\n",
|
||||
"uuids = [str(uuid4()) for _ in range(len(documents))]\n",
|
||||
"\n",
|
||||
"uuids = [str(uuid4()) for _ in range(len(documents))]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "413c3d9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store.add_documents(documents=documents, ids=uuids)"
|
||||
]
|
||||
},
|
||||
@@ -418,11 +404,11 @@
|
||||
"source": [
|
||||
"## Query vector store\n",
|
||||
"\n",
|
||||
"Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n",
|
||||
"Once your vector store has been created and the relevant documents have been added, you will most likely wish to query it during the running of your chain or agent. \n",
|
||||
"\n",
|
||||
"### Query directly\n",
|
||||
"\n",
|
||||
"The simplest scenario for using Qdrant vector store is to perform a similarity search. Under the hood, our query will be encoded into vector embeddings and used to find similar documents in Qdrant collection."
|
||||
"The simplest scenario for using the Qdrant vector store is to perform a similarity search. Under the hood, our query will be encoded into vector embeddings and used to find similar documents in a Qdrant collection."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -459,17 +445,17 @@
|
||||
"id": "79bcb0ce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`QdrantVectorStore` supports 3 modes for similarity searches. They can be configured using the `retrieval_mode` parameter when setting up the class.\n",
|
||||
"`QdrantVectorStore` supports 3 modes for similarity searches. They can be configured using the `retrieval_mode` parameter.\n",
|
||||
"\n",
|
||||
"- Dense Vector Search(Default)\n",
|
||||
"- Dense Vector Search (default)\n",
|
||||
"- Sparse Vector Search\n",
|
||||
"- Hybrid Search\n",
|
||||
"\n",
|
||||
"### Dense Vector Search\n",
|
||||
"\n",
|
||||
"To search with only dense vectors,\n",
|
||||
"Dense vector search involves calculating similarity via vector-based embeddings. To search with only dense vectors:\n",
|
||||
"\n",
|
||||
"- The `retrieval_mode` parameter should be set to `RetrievalMode.DENSE`(default).\n",
|
||||
"- The `retrieval_mode` parameter should be set to `RetrievalMode.DENSE`. This is the default behavior.\n",
|
||||
"- A [dense embeddings](https://python.langchain.com/docs/integrations/text_embedding/) value should be provided to the `embedding` parameter."
|
||||
]
|
||||
},
|
||||
@@ -480,18 +466,31 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_qdrant import RetrievalMode\n",
|
||||
"from langchain_qdrant import QdrantVectorStore, RetrievalMode\n",
|
||||
"from qdrant_client import QdrantClient\n",
|
||||
"from qdrant_client.http.models import Distance, VectorParams\n",
|
||||
"\n",
|
||||
"qdrant = QdrantVectorStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" location=\":memory:\",\n",
|
||||
"# Create a Qdrant client for local storage\n",
|
||||
"client = QdrantClient(path=\"/tmp/langchain_qdrant\")\n",
|
||||
"\n",
|
||||
"# Create a collection with dense vectors\n",
|
||||
"client.create_collection(\n",
|
||||
" collection_name=\"my_documents\",\n",
|
||||
" vectors_config=VectorParams(size=3072, distance=Distance.COSINE),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qdrant = QdrantVectorStore(\n",
|
||||
" client=client,\n",
|
||||
" collection_name=\"my_documents\",\n",
|
||||
" embedding=embeddings,\n",
|
||||
" retrieval_mode=RetrievalMode.DENSE,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = qdrant.similarity_search(query)"
|
||||
"qdrant.add_documents(documents=documents, ids=uuids)\n",
|
||||
"\n",
|
||||
"query = \"How much money did the robbers steal?\"\n",
|
||||
"found_docs = qdrant.similarity_search(query)\n",
|
||||
"found_docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -501,10 +500,10 @@
|
||||
"source": [
|
||||
"### Sparse Vector Search\n",
|
||||
"\n",
|
||||
"To search with only sparse vectors,\n",
|
||||
"To search with only sparse vectors:\n",
|
||||
"\n",
|
||||
"- The `retrieval_mode` parameter should be set to `RetrievalMode.SPARSE`.\n",
|
||||
"- An implementation of the [`SparseEmbeddings`](https://github.com/langchain-ai/langchain/blob/master/libs/partners/qdrant/langchain_qdrant/sparse_embeddings.py) interface using any sparse embeddings provider has to be provided as value to the `sparse_embedding` parameter.\n",
|
||||
"- An implementation of the [`SparseEmbeddings`](https://github.com/langchain-ai/langchain/blob/master/libs/partners/qdrant/langchain_qdrant/sparse_embeddings.py) interface using any sparse embeddings provider has to be provided as a value to the `sparse_embedding` parameter.\n",
|
||||
"\n",
|
||||
"The `langchain-qdrant` package provides a [FastEmbed](https://github.com/qdrant/fastembed) based implementation out of the box.\n",
|
||||
"\n",
|
||||
@@ -518,7 +517,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install fastembed"
|
||||
"%pip install -qU fastembed"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -528,20 +527,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_qdrant import FastEmbedSparse, RetrievalMode\n",
|
||||
"from langchain_qdrant import FastEmbedSparse, QdrantVectorStore, RetrievalMode\n",
|
||||
"from qdrant_client import QdrantClient, models\n",
|
||||
"from qdrant_client.http.models import Distance, SparseVectorParams, VectorParams\n",
|
||||
"\n",
|
||||
"sparse_embeddings = FastEmbedSparse(model_name=\"Qdrant/bm25\")\n",
|
||||
"\n",
|
||||
"qdrant = QdrantVectorStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" sparse_embedding=sparse_embeddings,\n",
|
||||
" location=\":memory:\",\n",
|
||||
"# Create a Qdrant client for local storage\n",
|
||||
"client = QdrantClient(path=\"/tmp/langchain_qdrant\")\n",
|
||||
"\n",
|
||||
"# Create a collection with sparse vectors\n",
|
||||
"client.create_collection(\n",
|
||||
" collection_name=\"my_documents\",\n",
|
||||
" retrieval_mode=RetrievalMode.SPARSE,\n",
|
||||
" vectors_config={\"dense\": VectorParams(size=3072, distance=Distance.COSINE)},\n",
|
||||
" sparse_vectors_config={\n",
|
||||
" \"sparse\": SparseVectorParams(index=models.SparseIndexParams(on_disk=False))\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = qdrant.similarity_search(query)"
|
||||
"qdrant = QdrantVectorStore(\n",
|
||||
" client=client,\n",
|
||||
" collection_name=\"my_documents\",\n",
|
||||
" sparse_embedding=sparse_embeddings,\n",
|
||||
" retrieval_mode=RetrievalMode.SPARSE,\n",
|
||||
" sparse_vector_name=\"sparse\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qdrant.add_documents(documents=documents, ids=uuids)\n",
|
||||
"\n",
|
||||
"query = \"How much money did the robbers steal?\"\n",
|
||||
"found_docs = qdrant.similarity_search(query)\n",
|
||||
"found_docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -555,9 +571,9 @@
|
||||
"\n",
|
||||
"- The `retrieval_mode` parameter should be set to `RetrievalMode.HYBRID`.\n",
|
||||
"- A [dense embeddings](https://python.langchain.com/docs/integrations/text_embedding/) value should be provided to the `embedding` parameter.\n",
|
||||
"- An implementation of the [`SparseEmbeddings`](https://github.com/langchain-ai/langchain/blob/master/libs/partners/qdrant/langchain_qdrant/sparse_embeddings.py) interface using any sparse embeddings provider has to be provided as value to the `sparse_embedding` parameter.\n",
|
||||
"- An implementation of the [`SparseEmbeddings`](https://github.com/langchain-ai/langchain/blob/master/libs/partners/qdrant/langchain_qdrant/sparse_embeddings.py) interface using any sparse embeddings provider has to be provided as a value to the `sparse_embedding` parameter.\n",
|
||||
"\n",
|
||||
"Note that if you've added documents with the `HYBRID` mode, you can switch to any retrieval mode when searching. Since both the dense and sparse vectors are available in the collection."
|
||||
"Note that if you've added documents with the `HYBRID` mode, you can switch to any retrieval mode when searching, since both the dense and sparse vectors are available in the collection."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -567,21 +583,39 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_qdrant import FastEmbedSparse, RetrievalMode\n",
|
||||
"from langchain_qdrant import FastEmbedSparse, QdrantVectorStore, RetrievalMode\n",
|
||||
"from qdrant_client import QdrantClient, models\n",
|
||||
"from qdrant_client.http.models import Distance, SparseVectorParams, VectorParams\n",
|
||||
"\n",
|
||||
"sparse_embeddings = FastEmbedSparse(model_name=\"Qdrant/bm25\")\n",
|
||||
"\n",
|
||||
"qdrant = QdrantVectorStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" sparse_embedding=sparse_embeddings,\n",
|
||||
" location=\":memory:\",\n",
|
||||
"# Create a Qdrant client for local storage\n",
|
||||
"client = QdrantClient(path=\"/tmp/langchain_qdrant\")\n",
|
||||
"\n",
|
||||
"# Create a collection with both dense and sparse vectors\n",
|
||||
"client.create_collection(\n",
|
||||
" collection_name=\"my_documents\",\n",
|
||||
" retrieval_mode=RetrievalMode.HYBRID,\n",
|
||||
" vectors_config={\"dense\": VectorParams(size=3072, distance=Distance.COSINE)},\n",
|
||||
" sparse_vectors_config={\n",
|
||||
" \"sparse\": SparseVectorParams(index=models.SparseIndexParams(on_disk=False))\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = qdrant.similarity_search(query)"
|
||||
"qdrant = QdrantVectorStore(\n",
|
||||
" client=client,\n",
|
||||
" collection_name=\"my_documents\",\n",
|
||||
" embedding=embeddings,\n",
|
||||
" sparse_embedding=sparse_embeddings,\n",
|
||||
" retrieval_mode=RetrievalMode.HYBRID,\n",
|
||||
" vector_name=\"dense\",\n",
|
||||
" sparse_vector_name=\"sparse\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qdrant.add_documents(documents=documents, ids=uuids)\n",
|
||||
"\n",
|
||||
"query = \"How much money did the robbers steal?\"\n",
|
||||
"found_docs = qdrant.similarity_search(query)\n",
|
||||
"found_docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -728,7 +762,7 @@
|
||||
"source": [
|
||||
"## Customizing Qdrant\n",
|
||||
"\n",
|
||||
"There are options to use an existing Qdrant collection within your Langchain application. In such cases, you may need to define how to map Qdrant point into the Langchain `Document`.\n",
|
||||
"There are options to use an existing Qdrant collection within your LangChain application. In such cases, you may need to define how to map Qdrant point into the LangChain `Document`.\n",
|
||||
"\n",
|
||||
"### Named vectors\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's load the OpenAI key to the environemnt. If you don't have one you can create an OpenAI account and create a key on this [page](https://platform.openai.com/account/api-keys)."
|
||||
"Let's load the OpenAI key to the environment. If you don't have one you can create an OpenAI account and create a key on this [page](https://platform.openai.com/account/api-keys)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -59,7 +59,10 @@ def check_header_order(path: Path) -> None:
|
||||
if doc_dir not in INFO_BY_DIR:
|
||||
# Skip if not a directory we care about
|
||||
return
|
||||
headers = _get_headers(doc_dir)
|
||||
if "toolkit" in path.name:
|
||||
headers = _get_headers("toolkits")
|
||||
else:
|
||||
headers = _get_headers(doc_dir)
|
||||
issue_number = INFO_BY_DIR[doc_dir].get("issue_number", "nonexistent")
|
||||
|
||||
print(f"Checking {doc_dir} page {path}")
|
||||
|
||||
@@ -147,6 +147,11 @@ WEBBROWSING_TOOL_FEAT_TABLE = {
|
||||
"interactions": True,
|
||||
"pricing": "40 free requests/day",
|
||||
},
|
||||
"AgentQL Toolkit": {
|
||||
"link": "/docs/integrations/tools/agentql",
|
||||
"interactions": True,
|
||||
"pricing": "Free trial, with pay-as-you-go and flat rate plans after",
|
||||
},
|
||||
}
|
||||
|
||||
DATABASE_TOOL_FEAT_TABLE = {
|
||||
|
||||
@@ -99,7 +99,7 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
|
||||
* @param {ChatModelTabsProps} props - Component props.
|
||||
*/
|
||||
export default function ChatModelTabs(props) {
|
||||
const [selectedModel, setSelectedModel] = useState("groq");
|
||||
const [selectedModel, setSelectedModel] = useState("openai");
|
||||
const {
|
||||
overrideParams,
|
||||
customVarName,
|
||||
@@ -108,13 +108,6 @@ export default function ChatModelTabs(props) {
|
||||
const llmVarName = customVarName ?? "model";
|
||||
|
||||
const tabItems = [
|
||||
{
|
||||
value: "groq",
|
||||
label: "Groq",
|
||||
model: "llama3-8b-8192",
|
||||
apiKeyName: "GROQ_API_KEY",
|
||||
packageName: "langchain[groq]",
|
||||
},
|
||||
{
|
||||
value: "openai",
|
||||
label: "OpenAI",
|
||||
@@ -156,6 +149,13 @@ ${llmVarName} = AzureChatOpenAI(
|
||||
apiKeyText: "# Ensure your AWS credentials are configured",
|
||||
packageName: "langchain[aws]",
|
||||
},
|
||||
{
|
||||
value: "groq",
|
||||
label: "Groq",
|
||||
model: "llama3-8b-8192",
|
||||
apiKeyName: "GROQ_API_KEY",
|
||||
packageName: "langchain[groq]",
|
||||
},
|
||||
{
|
||||
value: "cohere",
|
||||
label: "Cohere",
|
||||
@@ -211,6 +211,13 @@ ${llmVarName} = ChatWatsonx(
|
||||
apiKeyName: "DATABRICKS_TOKEN",
|
||||
packageName: "databricks-langchain",
|
||||
},
|
||||
{
|
||||
value: "xai",
|
||||
label: "xAI",
|
||||
model: "grok-2",
|
||||
apiKeyName: "XAI_API_KEY",
|
||||
packageName: "langchain-xai",
|
||||
},
|
||||
].map((item) => ({
|
||||
...item,
|
||||
...overrideParams?.[item.value],
|
||||
|
||||
@@ -819,6 +819,13 @@ const FEATURE_TABLES = {
|
||||
source: "Platform for running and scaling headless browsers, can be used to scrape/crawl any site",
|
||||
api: "API",
|
||||
apiLink: "https://python.langchain.com/docs/integrations/document_loaders/hyperbrowser/"
|
||||
},
|
||||
{
|
||||
name: "AgentQL",
|
||||
link: "agentql",
|
||||
source: "Web interaction and structured data extraction from any web page using an AgentQL query or a Natural Language prompt",
|
||||
api: "API",
|
||||
apiLink: "https://python.langchain.com/docs/integrations/document_loaders/agentql/"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
25
docs/static/img/logo-dark.svg
vendored
Normal file
25
docs/static/img/logo-dark.svg
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1584.81 250">
|
||||
<defs>
|
||||
<style>
|
||||
.cls-1 {
|
||||
fill: #1c3c3c;
|
||||
stroke-width: 0px;
|
||||
}
|
||||
</style>
|
||||
</defs>
|
||||
<g id="LanChain-logo">
|
||||
<g id="LangChain-logotype">
|
||||
<polygon class="cls-1" points="596.33 49.07 596.33 200.67 700.76 200.67 700.76 177.78 620.04 177.78 620.04 49.07 596.33 49.07"/>
|
||||
<path class="cls-1" d="M1126.83,49.07c-20.53,0-37.95,7.4-50.38,21.41-12.32,13.88-18.82,33.36-18.82,56.33,0,47.23,27.25,77.75,69.41,77.75,29.71,0,52.71-15.54,61.54-41.56l2.14-6.31-23.53-8.94-2.17,7.03c-5.26,17.01-18.75,26.38-37.99,26.38-27.48,0-44.55-20.82-44.55-54.34s17.23-54.34,44.97-54.34c19.23,0,30.31,7.54,35.95,24.44l2.46,7.37,22.91-10.75-2.1-5.9c-8.96-25.22-29.65-38.56-59.85-38.56Z"/>
|
||||
<path class="cls-1" d="M756.43,85.05c-22.76,0-39.78,10.67-46.69,29.27-.44,1.19-1.77,4.78-1.77,4.78l19.51,12.62,2.65-6.91c4.52-11.78,12.88-17.27,26.3-17.27s21.1,6.51,20.96,19.33c0,.52-.04,2.09-.04,2.09,0,0-17.76,2.88-25.08,4.43-31.23,6.6-44.31,18.52-44.31,38.02,0,10.39,5.77,21.64,16.3,27.95,6.32,3.78,14.57,5.21,23.68,5.21,5.99,0,11.81-.89,17.2-2.53,12.25-4.07,15.67-12.07,15.67-12.07v10.46h20.29v-74.78c0-25.42-16.7-40.6-44.67-40.6ZM777.46,164.85c0,7.86-8.56,18.93-28.5,18.93-5.63,0-9.62-1.49-12.28-3.71-3.56-2.97-4.73-7.24-4.24-11.01.21-1.64,1.2-5.17,4.87-8.23,3.75-3.13,10.38-5.37,20.62-7.6,8.42-1.83,19.54-3.85,19.54-3.85v15.48Z"/>
|
||||
<path class="cls-1" d="M876.11,85.04c-2.82,0-5.57.2-8.24.57-18.17,2.73-23.49,11.96-23.49,11.96l.02-9.31h-22.74s0,112.19,0,112.19h23.71v-62.18c0-21.13,15.41-30.75,29.73-30.75,15.48,0,23,8.32,23,25.45v67.48h23.71v-70.74c0-27.56-17.51-44.67-45.69-44.67Z"/>
|
||||
<path class="cls-1" d="M1539.12,85.04c-2.82,0-5.57.2-8.24.57-18.17,2.73-23.49,11.96-23.49,11.96v-9.32h-22.72v112.2h23.71v-62.18c0-21.13,15.41-30.75,29.73-30.75,15.48,0,23,8.32,23,25.45v67.48h23.71v-70.74c0-27.56-17.51-44.67-45.69-44.67Z"/>
|
||||
<path class="cls-1" d="M1020.76,88.26v11.55s-5.81-14.77-32.24-14.77c-32.84,0-53.24,22.66-53.24,59.15,0,20.59,6.58,36.8,18.19,47.04,9.03,7.96,21.09,12.04,35.45,12.32,9.99.19,16.46-2.53,20.5-5.1,7.76-4.94,10.64-9.63,10.64-9.63,0,0-.33,3.67-.93,8.64-.43,3.6-1.24,6.13-1.24,6.13h0c-3.61,12.85-14.17,20.28-29.57,20.28s-24.73-5.07-26.58-15.06l-23.05,6.88c3.98,19.2,22,30.66,48.2,30.66,17.81,0,31.77-4.84,41.5-14.4,9.81-9.64,14.79-23.53,14.79-41.29v-102.41h-22.42ZM1019.26,145.21c0,22.44-10.96,35.84-29.32,35.84-19.67,0-30.95-13.44-30.95-36.86s11.28-36.66,30.95-36.66c17.92,0,29.15,13.34,29.32,34.82v2.86Z"/>
|
||||
<path class="cls-1" d="M1259.01,85.04c-2.6,0-5.13.17-7.59.49-17.88,2.79-23.14,11.9-23.14,11.9v-2.67h-.01s0-45.69,0-45.69h-23.71v151.39h23.71v-62.18c0-21.27,15.41-30.95,29.73-30.95,15.48,0,23,8.32,23,25.45v67.68h23.71v-70.94c0-27.01-17.94-44.47-45.69-44.47Z"/>
|
||||
<circle class="cls-1" cx="1450.93" cy="64.47" r="15.37"/>
|
||||
<path class="cls-1" d="M1439.14,88.2v56.94h0c-6.75-5.56-14.6-9.75-23.5-12.26v-7.23c0-25.42-16.7-40.6-44.67-40.6-22.76,0-39.78,10.67-46.69,29.27-.44,1.19-1.77,4.78-1.77,4.78l19.51,12.62,2.65-6.91c4.52-11.78,12.88-17.27,26.3-17.27s21.1,6.51,20.96,19.33c0,.08,0,1.15,0,2.86-10.04-.28-19.38.69-27.77,2.66,0,0,0,0,0,0-11.06,2.5-31.6,8.85-38.94,25.36-.05.11-1.13,2.96-1.13,2.96-1.06,3.28-1.59,6.84-1.59,10.7,0,10.39,5.77,21.64,16.3,27.95,6.32,3.78,14.57,5.21,23.68,5.21,5.88,0,11.6-.86,16.91-2.44,12.49-4.04,15.96-12.16,15.96-12.16v10.47h20.29v-34.27c-5.7-3.56-14.26-5.66-23.65-5.64,0,2.65,0,4.33,0,4.33,0,7.86-8.56,18.93-28.5,18.93-5.63,0-9.62-1.49-12.28-3.71-3.56-2.97-4.73-7.24-4.24-11.01.21-1.64,1.2-5.17,4.87-8.23l-.04-.11c8.42-6.89,24.97-9.64,40.17-9.04v.03c12.94.47,22.62,3.01,29.53,7.77,1.88,1.19,3.65,2.52,5.28,3.98,6.94,6.23,9.73,13.9,10.93,18.38,1.95,7.31,1.43,18.57,1.43,18.57h23.59v-112.2h-23.59Z"/>
|
||||
</g>
|
||||
<path id="LangChain-symbol" class="cls-1" d="M393.52,75.2c9.66,9.66,9.66,25.38,0,35.04l-21.64,21.29-.22-1.22c-1.58-8.75-5.74-16.69-12.02-22.97-4.73-4.72-10.32-8.21-16.62-10.37-3.91,3.93-6.06,9.08-6.06,14.5,0,1.1.1,2.24.3,3.38,3.47,1.25,6.54,3.18,9.12,5.76,9.66,9.66,9.66,25.38,0,35.04l-18.84,18.84c-4.83,4.83-11.17,7.24-17.52,7.24s-12.69-2.41-17.52-7.24c-9.66-9.66-9.66-25.38,0-35.04l21.64-21.28.22,1.22c1.57,8.73,5.73,16.67,12.03,22.96,4.74,4.74,9.99,7.89,16.28,10.04l1.16-1.16c3.52-3.52,5.45-8.2,5.45-13.19,0-1.11-.1-2.22-.29-3.31-3.63-1.2-6.62-2.91-9.34-5.63-3.92-3.92-6.36-8.93-7.04-14.48-.05-.4-.08-.79-.12-1.19-.54-7.23,2.07-14.29,7.16-19.37l18.84-18.84c4.67-4.67,10.89-7.25,17.52-7.25s12.85,2.57,17.52,7.25ZM491.9,125c0,68.93-56.08,125-125,125H125C56.08,250,0,193.93,0,125S56.08,0,125,0h241.9c68.93,0,125,56.08,125,125ZM240.9,187.69c1.97-2.39-7.13-9.12-8.99-11.59-3.78-4.1-3.8-10-6.35-14.79-6.24-14.46-13.41-28.81-23.44-41.05-10.6-13.39-23.68-24.47-35.17-37.04-8.53-8.77-10.81-21.26-18.34-30.69-10.38-15.33-43.2-19.51-48.01,2.14.02.68-.19,1.11-.78,1.54-2.66,1.93-5.03,4.14-7.02,6.81-4.87,6.78-5.62,18.28.46,24.37.2-3.21.31-6.24,2.85-8.54,4.7,4.03,11.8,5.46,17.25,2.45,12.04,17.19,9.04,40.97,18.6,59.49,2.64,4.38,5.3,8.85,8.69,12.69,2.75,4.28,12.25,9.33,12.81,13.29.1,6.8-.7,14.23,3.76,19.92,2.1,4.26-3.06,8.54-7.22,8.01-5.4.74-11.99-3.63-16.72-.94-1.67,1.81-4.94-.19-6.38,2.32-.5,1.3-3.2,3.13-1.59,4.38,1.79-1.36,3.45-2.78,5.86-1.97-.36,1.96,1.19,2.24,2.42,2.81-.04,1.33-.82,2.69.2,3.82,1.19-1.2,1.9-2.9,3.79-3.4,6.28,8.37,12.67-8.47,26.26-.89-2.76-.14-5.21.21-7.07,2.48-.46.51-.85,1.11-.04,1.77,7.33-4.73,7.29,1.62,12.05-.33,3.66-1.91,7.3-4.3,11.65-3.62-4.23,1.22-4.4,4.62-6.88,7.49-.42.44-.62.94-.13,1.67,8.78-.74,9.5-3.66,16.59-7.24,5.29-3.23,10.56,4.6,15.14.14,1.01-.97,2.39-.64,3.64-.77-1.6-8.53-19.19,1.56-18.91-9.88,5.66-3.85,4.36-11.22,4.74-17.17,6.51,3.61,13.75,5.71,20.13,9.16,3.22,5.2,8.27,12.07,15,11.62.18-.52.34-.98.53-1.51,2.04.35,4.66,1.7,5.78-.88,3.05,3.19,7.53,3.03,11.52,2.21,2.95-2.4-5.55-5.82-6.69-8.29ZM419.51,92.72c0-11.64-4.52-22.57-12.73-30.78-8.21-8.21-19.14-12.73-30.79-12.73s-22.58,4.52-30.79,12.73l-18.84,18.84c-4.4,4.4-7.74,9.57-9.93,15.36l-.13.33-.34.1c-6.84,2.11-12.87,5.73-17.92,10.78l-18.84,18.84c-16.97,16.98-16.97,44.6,0,61.57,8.21,8.21,19.14,12.73,30.78,12.73h0c11.64,0,22.58-4.52,30.79-12.73l18.84-18.84c4.38-4.38,7.7-9.53,9.89-15.31l.13-.33.34-.11c6.72-2.06,12.92-5.8,17.95-10.82l18.84-18.84c8.21-8.21,12.73-19.14,12.73-30.79ZM172.38,173.6c-1.62,6.32-2.15,17.09-10.37,17.4-.68,3.65,2.53,5.02,5.44,3.85,2.89-1.33,4.26,1.05,5.23,3.42,4.46.65,11.06-1.49,11.31-6.77-6.66-3.84-8.72-11.14-11.62-17.9Z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.4 KiB |
25
docs/static/img/logo-light.svg
vendored
Normal file
25
docs/static/img/logo-light.svg
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1584.81 250">
|
||||
<defs>
|
||||
<style>
|
||||
.cls-1 {
|
||||
fill: #fff;
|
||||
stroke-width: 0px;
|
||||
}
|
||||
</style>
|
||||
</defs>
|
||||
<g id="LanChain-logo">
|
||||
<g id="LangChain-logotype">
|
||||
<polygon class="cls-1" points="596.33 49.07 596.33 200.67 700.76 200.67 700.76 177.78 620.04 177.78 620.04 49.07 596.33 49.07"/>
|
||||
<path class="cls-1" d="M1126.83,49.07c-20.53,0-37.95,7.4-50.38,21.41-12.32,13.88-18.82,33.36-18.82,56.33,0,47.23,27.25,77.75,69.41,77.75,29.71,0,52.71-15.54,61.54-41.56l2.14-6.31-23.53-8.94-2.17,7.03c-5.26,17.01-18.75,26.38-37.99,26.38-27.48,0-44.55-20.82-44.55-54.34s17.23-54.34,44.97-54.34c19.23,0,30.31,7.54,35.95,24.44l2.46,7.37,22.91-10.75-2.1-5.9c-8.96-25.22-29.65-38.56-59.85-38.56Z"/>
|
||||
<path class="cls-1" d="M756.43,85.05c-22.76,0-39.78,10.67-46.69,29.27-.44,1.19-1.77,4.78-1.77,4.78l19.51,12.62,2.65-6.91c4.52-11.78,12.88-17.27,26.3-17.27s21.1,6.51,20.96,19.33c0,.52-.04,2.09-.04,2.09,0,0-17.76,2.88-25.08,4.43-31.23,6.6-44.31,18.52-44.31,38.02,0,10.39,5.77,21.64,16.3,27.95,6.32,3.78,14.57,5.21,23.68,5.21,5.99,0,11.81-.89,17.2-2.53,12.25-4.07,15.67-12.07,15.67-12.07v10.46h20.29v-74.78c0-25.42-16.7-40.6-44.67-40.6ZM777.46,164.85c0,7.86-8.56,18.93-28.5,18.93-5.63,0-9.62-1.49-12.28-3.71-3.56-2.97-4.73-7.24-4.24-11.01.21-1.64,1.2-5.17,4.87-8.23,3.75-3.13,10.38-5.37,20.62-7.6,8.42-1.83,19.54-3.85,19.54-3.85v15.48Z"/>
|
||||
<path class="cls-1" d="M876.11,85.04c-2.82,0-5.57.2-8.24.57-18.17,2.73-23.49,11.96-23.49,11.96l.02-9.31h-22.74s0,112.19,0,112.19h23.71v-62.18c0-21.13,15.41-30.75,29.73-30.75,15.48,0,23,8.32,23,25.45v67.48h23.71v-70.74c0-27.56-17.51-44.67-45.69-44.67Z"/>
|
||||
<path class="cls-1" d="M1539.12,85.04c-2.82,0-5.57.2-8.24.57-18.17,2.73-23.49,11.96-23.49,11.96v-9.32h-22.72v112.2h23.71v-62.18c0-21.13,15.41-30.75,29.73-30.75,15.48,0,23,8.32,23,25.45v67.48h23.71v-70.74c0-27.56-17.51-44.67-45.69-44.67Z"/>
|
||||
<path class="cls-1" d="M1020.76,88.26v11.55s-5.81-14.77-32.24-14.77c-32.84,0-53.24,22.66-53.24,59.15,0,20.59,6.58,36.8,18.19,47.04,9.03,7.96,21.09,12.04,35.45,12.32,9.99.19,16.46-2.53,20.5-5.1,7.76-4.94,10.64-9.63,10.64-9.63,0,0-.33,3.67-.93,8.64-.43,3.6-1.24,6.13-1.24,6.13h0c-3.61,12.85-14.17,20.28-29.57,20.28s-24.73-5.07-26.58-15.06l-23.05,6.88c3.98,19.2,22,30.66,48.2,30.66,17.81,0,31.77-4.84,41.5-14.4,9.81-9.64,14.79-23.53,14.79-41.29v-102.41h-22.42ZM1019.26,145.21c0,22.44-10.96,35.84-29.32,35.84-19.67,0-30.95-13.44-30.95-36.86s11.28-36.66,30.95-36.66c17.92,0,29.15,13.34,29.32,34.82v2.86Z"/>
|
||||
<path class="cls-1" d="M1259.01,85.04c-2.6,0-5.13.17-7.59.49-17.88,2.79-23.14,11.9-23.14,11.9v-2.67h-.01s0-45.69,0-45.69h-23.71v151.39h23.71v-62.18c0-21.27,15.41-30.95,29.73-30.95,15.48,0,23,8.32,23,25.45v67.68h23.71v-70.94c0-27.01-17.94-44.47-45.69-44.47Z"/>
|
||||
<circle class="cls-1" cx="1450.93" cy="64.47" r="15.37"/>
|
||||
<path class="cls-1" d="M1439.14,88.2v56.94h0c-6.75-5.56-14.6-9.75-23.5-12.26v-7.23c0-25.42-16.7-40.6-44.67-40.6-22.76,0-39.78,10.67-46.69,29.27-.44,1.19-1.77,4.78-1.77,4.78l19.51,12.62,2.65-6.91c4.52-11.78,12.88-17.27,26.3-17.27s21.1,6.51,20.96,19.33c0,.08,0,1.15,0,2.86-10.04-.28-19.38.69-27.77,2.66,0,0,0,0,0,0-11.06,2.5-31.6,8.85-38.94,25.36-.05.11-1.13,2.96-1.13,2.96-1.06,3.28-1.59,6.84-1.59,10.7,0,10.39,5.77,21.64,16.3,27.95,6.32,3.78,14.57,5.21,23.68,5.21,5.88,0,11.6-.86,16.91-2.44,12.49-4.04,15.96-12.16,15.96-12.16v10.47h20.29v-34.27c-5.7-3.56-14.26-5.66-23.65-5.64,0,2.65,0,4.33,0,4.33,0,7.86-8.56,18.93-28.5,18.93-5.63,0-9.62-1.49-12.28-3.71-3.56-2.97-4.73-7.24-4.24-11.01.21-1.64,1.2-5.17,4.87-8.23l-.04-.11c8.42-6.89,24.97-9.64,40.17-9.04v.03c12.94.47,22.62,3.01,29.53,7.77,1.88,1.19,3.65,2.52,5.28,3.98,6.94,6.23,9.73,13.9,10.93,18.38,1.95,7.31,1.43,18.57,1.43,18.57h23.59v-112.2h-23.59Z"/>
|
||||
</g>
|
||||
<path id="LangChain-symbol" class="cls-1" d="M393.52,75.2c9.66,9.66,9.66,25.38,0,35.04l-21.64,21.29-.22-1.22c-1.58-8.75-5.74-16.69-12.02-22.97-4.73-4.72-10.32-8.21-16.62-10.37-3.91,3.93-6.06,9.08-6.06,14.5,0,1.1.1,2.24.3,3.38,3.47,1.25,6.54,3.18,9.12,5.76,9.66,9.66,9.66,25.38,0,35.04l-18.84,18.84c-4.83,4.83-11.17,7.24-17.52,7.24s-12.69-2.41-17.52-7.24c-9.66-9.66-9.66-25.38,0-35.04l21.64-21.28.22,1.22c1.57,8.73,5.73,16.67,12.03,22.96,4.74,4.74,9.99,7.89,16.28,10.04l1.16-1.16c3.52-3.52,5.45-8.2,5.45-13.19,0-1.11-.1-2.22-.29-3.31-3.63-1.2-6.62-2.91-9.34-5.63-3.92-3.92-6.36-8.93-7.04-14.48-.05-.4-.08-.79-.12-1.19-.54-7.23,2.07-14.29,7.16-19.37l18.84-18.84c4.67-4.67,10.89-7.25,17.52-7.25s12.85,2.57,17.52,7.25ZM491.9,125c0,68.93-56.08,125-125,125H125C56.08,250,0,193.93,0,125S56.08,0,125,0h241.9C435.82,0,491.9,56.08,491.9,125ZM240.9,187.69c1.97-2.39-7.13-9.12-8.99-11.59-3.78-4.1-3.8-10-6.35-14.79-6.24-14.46-13.41-28.81-23.44-41.05-10.6-13.39-23.68-24.47-35.17-37.04-8.53-8.77-10.81-21.26-18.34-30.69-10.38-15.33-43.2-19.51-48.01,2.14.02.68-.19,1.11-.78,1.54-2.66,1.93-5.03,4.14-7.02,6.81-4.87,6.78-5.62,18.28.46,24.37.2-3.21.31-6.24,2.85-8.54,4.7,4.03,11.8,5.46,17.25,2.45,12.04,17.19,9.04,40.97,18.6,59.49,2.64,4.38,5.3,8.85,8.69,12.69,2.75,4.28,12.25,9.33,12.81,13.29.1,6.8-.7,14.23,3.76,19.92,2.1,4.26-3.06,8.54-7.22,8.01-5.4.74-11.99-3.63-16.72-.94-1.67,1.81-4.94-.19-6.38,2.32-.5,1.3-3.2,3.13-1.59,4.38,1.79-1.36,3.45-2.78,5.86-1.97-.36,1.96,1.19,2.24,2.42,2.81-.04,1.33-.82,2.69.2,3.82,1.19-1.2,1.9-2.9,3.79-3.4,6.28,8.37,12.67-8.47,26.26-.89-2.76-.14-5.21.21-7.07,2.48-.46.51-.85,1.11-.04,1.77,7.33-4.73,7.29,1.62,12.05-.33,3.66-1.91,7.3-4.3,11.65-3.62-4.23,1.22-4.4,4.62-6.88,7.49-.42.44-.62.94-.13,1.67,8.78-.74,9.5-3.66,16.59-7.24,5.29-3.23,10.56,4.6,15.14.14,1.01-.97,2.39-.64,3.64-.77-1.6-8.53-19.19,1.56-18.91-9.88,5.66-3.85,4.36-11.22,4.74-17.17,6.51,3.61,13.75,5.71,20.13,9.16,3.22,5.2,8.27,12.07,15,11.62.18-.52.34-.98.53-1.51,2.04.35,4.66,1.7,5.78-.88,3.05,3.19,7.53,3.03,11.52,2.21,2.95-2.4-5.55-5.82-6.69-8.29ZM419.51,92.72c0-11.64-4.52-22.57-12.73-30.78-8.21-8.21-19.14-12.73-30.79-12.73s-22.58,4.52-30.79,12.73l-18.84,18.84c-4.4,4.4-7.74,9.57-9.93,15.36l-.13.33-.34.1c-6.84,2.11-12.87,5.73-17.92,10.78l-18.84,18.84c-16.97,16.98-16.97,44.6,0,61.57,8.21,8.21,19.14,12.73,30.78,12.73h0c11.64,0,22.58-4.52,30.79-12.73l18.84-18.84c4.38-4.38,7.7-9.53,9.89-15.31l.13-.33.34-.11c6.72-2.06,12.92-5.8,17.95-10.82l18.84-18.84c8.21-8.21,12.73-19.14,12.73-30.79ZM172.38,173.6c-1.62,6.32-2.15,17.09-10.37,17.4-.68,3.65,2.53,5.02,5.44,3.85,2.89-1.33,4.26,1.05,5.23,3.42,4.46.65,11.06-1.49,11.31-6.77-6.66-3.84-8.72-11.14-11.62-17.9Z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.4 KiB |
@@ -1 +1,2 @@
|
||||
httpx
|
||||
httpx
|
||||
grpcio
|
||||
|
||||
@@ -48,4 +48,5 @@ _e2e_test:
|
||||
poetry run pip install -e ../../../standard-tests && \
|
||||
make format lint tests && \
|
||||
poetry install --with test_integration && \
|
||||
poetry run pip install -e ../../../core && \
|
||||
make integration_test
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user