mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 01:00:22 +00:00
Compare commits
170 Commits
langchain-
...
rlm/test-l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c6cb273d0 | ||
|
|
ad624ae3ed | ||
|
|
550cf6ac8f | ||
|
|
5ab3d5e54c | ||
|
|
d4b2876e50 | ||
|
|
4cfadef235 | ||
|
|
02ff78deb8 | ||
|
|
c3a8716589 | ||
|
|
f32d57f6f0 | ||
|
|
235d91940d | ||
|
|
344adad056 | ||
|
|
be79ce9336 | ||
|
|
57c1239643 | ||
|
|
fe2e5a3b74 | ||
|
|
a24a9c6427 | ||
|
|
4367e89c9a | ||
|
|
28f744c1f5 | ||
|
|
835926153b | ||
|
|
035a9c9609 | ||
|
|
67e58fdc2e | ||
|
|
6b8963ad92 | ||
|
|
aa49163bdf | ||
|
|
ffe75d1e46 | ||
|
|
51005e2776 | ||
|
|
2904c50cd5 | ||
|
|
80560419b0 | ||
|
|
b57aa89f34 | ||
|
|
f26ab93df8 | ||
|
|
c1ef731503 | ||
|
|
05bf98b2f9 | ||
|
|
3999761201 | ||
|
|
e08879147b | ||
|
|
0d495f3f63 | ||
|
|
e0e40f3f63 | ||
|
|
feb73d4281 | ||
|
|
17b486a37b | ||
|
|
02495ae7c5 | ||
|
|
51942c03eb | ||
|
|
95883a99a9 | ||
|
|
12ddb4fc6f | ||
|
|
cfed68e06f | ||
|
|
1925bde32e | ||
|
|
35f4aa927b | ||
|
|
f23bec7be6 | ||
|
|
abb0cecb44 | ||
|
|
db7e7b69e3 | ||
|
|
8b40428f58 | ||
|
|
ba3e219d83 | ||
|
|
234394f631 | ||
|
|
5fc5ed463c | ||
|
|
148088a588 | ||
|
|
ef868bc24b | ||
|
|
62f13f95e4 | ||
|
|
29064848f9 | ||
|
|
c040dc7017 | ||
|
|
24fa17593f | ||
|
|
584a1e30ac | ||
|
|
1a911018bc | ||
|
|
67012c2558 | ||
|
|
af129974a3 | ||
|
|
51a0d4574e | ||
|
|
b2daba37c7 | ||
|
|
14f3014cce | ||
|
|
3280a5b49b | ||
|
|
7fcef2556c | ||
|
|
328d0c99f2 | ||
|
|
c3d4126eb1 | ||
|
|
8250c177de | ||
|
|
59bef31997 | ||
|
|
c34ad8c163 | ||
|
|
89128b7a49 | ||
|
|
4e676a63b8 | ||
|
|
4050d6ea2b | ||
|
|
a6fc74f379 | ||
|
|
75cba742e5 | ||
|
|
58192d617f | ||
|
|
1e748a6d40 | ||
|
|
91fed3ace7 | ||
|
|
8ba868d3b0 | ||
|
|
9120cf5df2 | ||
|
|
64dbc52cae | ||
|
|
ad502e8d50 | ||
|
|
cb183a9bf1 | ||
|
|
d700ce8545 | ||
|
|
39fd44579a | ||
|
|
339e3b7f55 | ||
|
|
3c53cea760 | ||
|
|
c438b5b78e | ||
|
|
efcb04f84b | ||
|
|
222b1ba112 | ||
|
|
f021be510e | ||
|
|
64d68c17cd | ||
|
|
48fba40fce | ||
|
|
e60f88ccdd | ||
|
|
85aa218564 | ||
|
|
8e86080def | ||
|
|
e850de2422 | ||
|
|
593de8a913 | ||
|
|
99a3cad258 | ||
|
|
161b02a8be | ||
|
|
50258a7dda | ||
|
|
9b45374118 | ||
|
|
3796672c67 | ||
|
|
03178ee74f | ||
|
|
9d4350e69a | ||
|
|
7a197539aa | ||
|
|
77ad857934 | ||
|
|
8fd231086e | ||
|
|
6db25b4e31 | ||
|
|
17c127531a | ||
|
|
58b118544e | ||
|
|
9a8fe58ebe | ||
|
|
23bba18f92 | ||
|
|
98b2e7b195 | ||
|
|
0061ded002 | ||
|
|
25cf1a74d5 | ||
|
|
b0f014666d | ||
|
|
bc7e32f315 | ||
|
|
f2dd31b9e8 | ||
|
|
ef3df45d9d | ||
|
|
cbd5720011 | ||
|
|
f78ae1d932 | ||
|
|
f397a84a59 | ||
|
|
afe89a1411 | ||
|
|
5119ab2fb9 | ||
|
|
52da6a160d | ||
|
|
c599732e1a | ||
|
|
01352bb55f | ||
|
|
56e5aa4dd9 | ||
|
|
1f751343e2 | ||
|
|
13140dc4ff | ||
|
|
ba0dca46d7 | ||
|
|
c01467b1f4 | ||
|
|
86509161b0 | ||
|
|
8fad2e209a | ||
|
|
678a19a5f7 | ||
|
|
ceb73ad06f | ||
|
|
1ad1dc5303 | ||
|
|
2d81a72884 | ||
|
|
dac355fc62 | ||
|
|
a7ae16f912 | ||
|
|
3e92ed8056 | ||
|
|
ed8e9c437a | ||
|
|
eabcfaa3d6 | ||
|
|
acaf214a45 | ||
|
|
16cce76a68 | ||
|
|
8a57102918 | ||
|
|
4d82cea71f | ||
|
|
a8098f5ddb | ||
|
|
6ffa0acf32 | ||
|
|
1bad0ac946 | ||
|
|
8cbce684d4 | ||
|
|
75ed9ee929 | ||
|
|
0214246dc6 | ||
|
|
410e9add44 | ||
|
|
0c9a034ed7 | ||
|
|
2b9f1469d8 | ||
|
|
ee32369265 | ||
|
|
dcec133b85 | ||
|
|
f34337447f | ||
|
|
2443e85533 | ||
|
|
86698b02a9 | ||
|
|
596c062cba | ||
|
|
c64b0a3095 | ||
|
|
10b12e1c08 | ||
|
|
569d325a59 | ||
|
|
93049d1563 | ||
|
|
04631439c9 | ||
|
|
f39e1a2288 | ||
|
|
2bc50fb895 |
@@ -10,7 +10,7 @@ You can use the dev container configuration in this folder to build and run the
|
||||
You may use the button above, or follow these steps to open this repo in a Codespace:
|
||||
1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchain.
|
||||
1. Click on the **Codespaces** tab.
|
||||
1. Click **Create codespace on master** .
|
||||
1. Click **Create codespace on master**.
|
||||
|
||||
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
|
||||
|
||||
|
||||
4
.github/workflows/check_diffs.yml
vendored
4
.github/workflows/check_diffs.yml
vendored
@@ -123,7 +123,9 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
poetry install --with test
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_testing_deps.txt
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
75
.github/workflows/scheduled_test.yml
vendored
75
.github/workflows/scheduled_test.yml
vendored
@@ -10,6 +10,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -25,16 +26,52 @@ jobs:
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
- "libs/partners/together"
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
- "libs/partners/cohere"
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
- "libs/partners/nvidia-ai-endpoints"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
path: langchain
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-nvidia
|
||||
path: langchain-nvidia
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-cohere
|
||||
path: langchain-cohere
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-aws
|
||||
path: langchain-aws
|
||||
|
||||
- name: Move libs
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/nvidia-ai-endpoints \
|
||||
langchain/libs/partners/cohere
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-nvidia/libs/ai-endpoints langchain/libs/partners/nvidia-ai-endpoints
|
||||
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
@@ -43,16 +80,20 @@ jobs:
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Run integration tests
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
shell: bash
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
@@ -67,12 +108,26 @@ jobs:
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
run: |
|
||||
make integration_test
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
make integration_tests
|
||||
|
||||
- name: Remove external libraries
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/nvidia-ai-endpoints \
|
||||
langchain/libs/partners/cohere \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
shell: bash
|
||||
working-directory: langchain
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -133,6 +133,7 @@ env.bak/
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.mypy_cache_test/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
|
||||
75
README.md
75
README.md
@@ -2,17 +2,17 @@
|
||||
|
||||
⚡ Build context-aware reasoning applications ⚡
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/check_diffs.yml)
|
||||
[](https://pepy.tech/project/langchain-core)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://twitter.com/langchainai)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
@@ -38,22 +38,22 @@ conda install langchain -c conda-forge
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/docs/expression_language/) and [components](https://python.langchain.com/docs/modules/). Integrate with hundreds of [third-party providers](https://python.langchain.com/docs/integrations/platforms/).
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://python.langchain.com/docs/langsmith/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/docs/langserve).
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
|
||||
|
||||
### Open-source libraries
|
||||
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[`LangGraph`](https://python.langchain.com/docs/langgraph)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
### Productionization:
|
||||
- **[LangSmith](https://python.langchain.com/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
- **[LangServe](https://python.langchain.com/docs/langserve)**: A library for deploying LangChain chains as REST APIs.
|
||||
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
|
||||
|
||||

|
||||
|
||||
@@ -61,20 +61,20 @@ For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
**❓ Question answering with RAG**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/question_answering/)
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/rag/)
|
||||
- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
|
||||
|
||||
**🧱 Extracting structured output**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/extraction/)
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/extraction/)
|
||||
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain-extract/)
|
||||
|
||||
**🤖 Chatbots**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/chatbots)
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/chatbot/)
|
||||
- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
|
||||
|
||||
And much more! Head to the [Use cases](https://python.langchain.com/docs/use_cases/) section of the docs for more.
|
||||
And much more! Head to the [Tutorials](https://python.langchain.com/v0.2/docs/tutorials/) section of the docs for more.
|
||||
|
||||
## 🚀 How does LangChain help?
|
||||
The main value props of the LangChain libraries are:
|
||||
@@ -87,49 +87,50 @@ Off-the-shelf chains make it easy to get started. Components make it easy to cus
|
||||
|
||||
LCEL is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
|
||||
|
||||
- **[Overview](https://python.langchain.com/docs/expression_language/)**: LCEL and its benefits
|
||||
- **[Interface](https://python.langchain.com/docs/expression_language/interface)**: The standard interface for LCEL objects
|
||||
- **[Primitives](https://python.langchain.com/docs/expression_language/primitives)**: More on the primitives LCEL includes
|
||||
- **[Overview](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel)**: LCEL and its benefits
|
||||
- **[Interface](https://python.langchain.com/v0.2/docs/concepts/#runnable-interface)**: The standard Runnable interface for LCEL objects
|
||||
- **[Primitives](https://python.langchain.com/v0.2/docs/how_to/#langchain-expression-language-lcel)**: More on the primitives LCEL includes
|
||||
- **[Cheatsheet](https://python.langchain.com/v0.2/docs/how_to/lcel_cheatsheet/)**: Quick overview of the most common usage patterns
|
||||
|
||||
## Components
|
||||
|
||||
Components fall into the following **modules**:
|
||||
|
||||
**📃 Model I/O:**
|
||||
**📃 Model I/O**
|
||||
|
||||
This includes [prompt management](https://python.langchain.com/docs/modules/model_io/prompts/), [prompt optimization](https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/), a generic interface for [chat models](https://python.langchain.com/docs/modules/model_io/chat/) and [LLMs](https://python.langchain.com/docs/modules/model_io/llms/), and common utilities for working with [model outputs](https://python.langchain.com/docs/modules/model_io/output_parsers/).
|
||||
This includes [prompt management](https://python.langchain.com/v0.2/docs/concepts/#prompt-templates), [prompt optimization](https://python.langchain.com/v0.2/docs/concepts/#example-selectors), a generic interface for [chat models](https://python.langchain.com/v0.2/docs/concepts/#chat-models) and [LLMs](https://python.langchain.com/v0.2/docs/concepts/#llms), and common utilities for working with [model outputs](https://python.langchain.com/v0.2/docs/concepts/#output-parsers).
|
||||
|
||||
**📚 Retrieval:**
|
||||
**📚 Retrieval**
|
||||
|
||||
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/docs/modules/data_connection/document_loaders/) from a variety of sources, [preparing it](https://python.langchain.com/docs/modules/data_connection/document_loaders/), [then retrieving it](https://python.langchain.com/docs/modules/data_connection/retrievers/) for use in the generation step.
|
||||
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/v0.2/docs/concepts/#document-loaders) from a variety of sources, [preparing it](https://python.langchain.com/v0.2/docs/concepts/#text-splitters), then [searching over (a.k.a. retrieving from)](https://python.langchain.com/v0.2/docs/concepts/#retrievers) it for use in the generation step.
|
||||
|
||||
**🤖 Agents:**
|
||||
**🤖 Agents**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete done. LangChain provides a [standard interface for agents](https://python.langchain.com/docs/modules/agents/), a [selection of agents](https://python.langchain.com/docs/modules/agents/agent_types/) to choose from, and examples of end-to-end agents.
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
Please see [here](https://python.langchain.com) for full documentation, which includes:
|
||||
|
||||
- [Getting started](https://python.langchain.com/docs/get_started/introduction): installation, setting up the environment, simple examples
|
||||
- [Use case](https://python.langchain.com/docs/use_cases/) walkthroughs and best practice [guides](https://python.langchain.com/docs/guides/)
|
||||
- Overviews of the [interfaces](https://python.langchain.com/docs/expression_language/), [components](https://python.langchain.com/docs/modules/), and [integrations](https://python.langchain.com/docs/integrations/providers)
|
||||
|
||||
You can also check out the full [API Reference docs](https://api.python.langchain.com).
|
||||
- [Introduction](https://python.langchain.com/v0.2/docs/introduction/): Overview of the framework and the structure of the docs.
|
||||
- [Tutorials](https://python.langchain.com/docs/use_cases/): If you're looking to build something specific or are more of a hands-on learner, check out our tutorials. This is the best place to get started.
|
||||
- [How-to guides](https://python.langchain.com/v0.2/docs/how_to/): Answers to “How do I….?” type questions. These guides are goal-oriented and concrete; they're meant to help you complete a specific task.
|
||||
- [Conceptual guide](https://python.langchain.com/v0.2/docs/concepts/): Conceptual explanations of the key parts of the framework.
|
||||
- [API Reference](https://api.python.langchain.com): Thorough documentation of every class and method.
|
||||
|
||||
## 🌐 Ecosystem
|
||||
|
||||
- [🦜🛠️ LangSmith](https://python.langchain.com/docs/langsmith/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://python.langchain.com/docs/langgraph): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
|
||||
- [LangChain Templates](https://python.langchain.com/docs/templates/): Example applications hosted with LangServe.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/docs/contributing/).
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/v0.2/docs/contributing/).
|
||||
|
||||
## 🌟 Contributors
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"# Needed since jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
|
||||
497
cookbook/nomic_multimodal_rag.ipynb
Normal file
497
cookbook/nomic_multimodal_rag.ipynb
Normal file
@@ -0,0 +1,497 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9fc3897d-176f-4729-8fd1-cfb4add53abd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Nomic multi-modal RAG\n",
|
||||
"\n",
|
||||
"Many documents contain a mixture of content types, including text and images. \n",
|
||||
"\n",
|
||||
"Yet, information captured in images is lost in most RAG applications.\n",
|
||||
"\n",
|
||||
"With the emergence of multimodal LLMs, like [GPT-4V](https://openai.com/research/gpt-4v-system-card), it is worth considering how to utilize images in RAG:\n",
|
||||
"\n",
|
||||
"In this demo we\n",
|
||||
"\n",
|
||||
"* Use multimodal embeddings from Nomic Embed [Vision](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) and [Text](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) to embed images and text\n",
|
||||
"* Retrieve both using similarity search\n",
|
||||
"* Pass raw images and text chunks to a multimodal LLM for answer synthesis \n",
|
||||
"\n",
|
||||
"## Signup\n",
|
||||
"\n",
|
||||
"Get your API token, then run:\n",
|
||||
"```\n",
|
||||
"! nomic login\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then run with your generated API token \n",
|
||||
"```\n",
|
||||
"! nomic login < token > \n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Packages\n",
|
||||
"\n",
|
||||
"For `unstructured`, you will also need `poppler` ([installation instructions](https://pdf2image.readthedocs.io/en/latest/installation.html)) and `tesseract` ([installation instructions](https://tesseract-ocr.github.io/tessdoc/Installation.html)) in your system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54926b9b-75c2-4cd4-8f14-b3882a0d370b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! nomic login token"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "febbc459-ebba-4c1a-a52b-fed7731593f8",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "acbdc603-39e2-4a5f-836c-2bbaecd46b0b",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
|
||||
"! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml pillow matplotlib tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e94b3fb-8e3e-4736-be0a-ad881626c7bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Loading\n",
|
||||
"\n",
|
||||
"### Partition PDF text and images\n",
|
||||
" \n",
|
||||
"Let's look at an example pdfs containing interesting images.\n",
|
||||
"\n",
|
||||
"1/ Art from the J Paul Getty museum:\n",
|
||||
"\n",
|
||||
" * Here is a [zip file](https://drive.google.com/file/d/18kRKbq2dqAhhJ3DfZRnYcTBEUfYxe1YR/view?usp=sharing) with the PDF and the already extracted images. \n",
|
||||
"* https://www.getty.edu/publications/resources/virtuallibrary/0892360224.pdf\n",
|
||||
"\n",
|
||||
"2/ Famous photographs from library of congress:\n",
|
||||
"\n",
|
||||
"* https://www.loc.gov/lcm/pdf/LCM_2020_1112.pdf\n",
|
||||
"* We'll use this as an example below\n",
|
||||
"\n",
|
||||
"We can use `partition_pdf` below from [Unstructured](https://unstructured-io.github.io/unstructured/introduction.html#key-concepts) to extract text and images.\n",
|
||||
"\n",
|
||||
"To supply this to extract the images:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=True\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If using this zip file, then you can simply process the text only with:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=False\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9646b524-71a7-4b2a-bdc8-0b81f77e968f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Folder with pdf and extracted images\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"# replace with actual path to images\n",
|
||||
"path = Path(\"../art\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f096ab-a933-41d0-8f4e-1efc83998fc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"path.resolve()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc4839c0-8773-4a07-ba59-5364501269b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Extract images, tables, and chunk text\n",
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=str(path.resolve()) + \"/getty.pdf\",\n",
|
||||
" extract_images_in_pdf=False,\n",
|
||||
" infer_table_structure=True,\n",
|
||||
" chunking_strategy=\"by_title\",\n",
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "969545ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Categorize text elements by type\n",
|
||||
"tables = []\n",
|
||||
"texts = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
" if \"unstructured.documents.elements.Table\" in str(type(element)):\n",
|
||||
" tables.append(str(element))\n",
|
||||
" elif \"unstructured.documents.elements.CompositeElement\" in str(type(element)):\n",
|
||||
" texts.append(str(element))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d8e6349-1547-4cbf-9c6f-491d8610ec10",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal embeddings with our document\n",
|
||||
"\n",
|
||||
"We will use [nomic-embed-vision-v1.5](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) embeddings. This model is aligned \n",
|
||||
"to [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) allowing for multimodal semantic search and Multimodal RAG!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4bc15842-cb95-4f84-9eb5-656b0282a800",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
"# Create chroma\n",
|
||||
"text_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_text\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"image_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_image\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Get image URIs with .jpg extension only\n",
|
||||
"image_uris = sorted(\n",
|
||||
" [\n",
|
||||
" os.path.join(path, image_name)\n",
|
||||
" for image_name in os.listdir(path)\n",
|
||||
" if image_name.endswith(\".jpg\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"image_vectorstore.add_images(uris=image_uris)\n",
|
||||
"\n",
|
||||
"# Add documents\n",
|
||||
"text_vectorstore.add_texts(texts=texts)\n",
|
||||
"\n",
|
||||
"# Make retriever\n",
|
||||
"image_retriever = image_vectorstore.as_retriever()\n",
|
||||
"text_retriever = text_vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02a186d0-27e0-4820-8092-63b5349dd25d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RAG\n",
|
||||
"\n",
|
||||
"`vectorstore.add_images` will store / retrieve images as base64 encoded strings.\n",
|
||||
"\n",
|
||||
"These can be passed to [GPT-4V](https://platform.openai.com/docs/guides/vision)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "344f56a8-0dc3-433e-851c-3f7600c7a72b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"import io\n",
|
||||
"from io import BytesIO\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"from PIL import Image\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def resize_base64_image(base64_string, size=(128, 128)):\n",
|
||||
" \"\"\"\n",
|
||||
" Resize an image encoded as a Base64 string.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" base64_string (str): Base64 string of the original image.\n",
|
||||
" size (tuple): Desired size of the image as (width, height).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" str: Base64 string of the resized image.\n",
|
||||
" \"\"\"\n",
|
||||
" # Decode the Base64 string\n",
|
||||
" img_data = base64.b64decode(base64_string)\n",
|
||||
" img = Image.open(io.BytesIO(img_data))\n",
|
||||
"\n",
|
||||
" # Resize the image\n",
|
||||
" resized_img = img.resize(size, Image.LANCZOS)\n",
|
||||
"\n",
|
||||
" # Save the resized image to a bytes buffer\n",
|
||||
" buffered = io.BytesIO()\n",
|
||||
" resized_img.save(buffered, format=img.format)\n",
|
||||
"\n",
|
||||
" # Encode the resized image to Base64\n",
|
||||
" return base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def is_base64(s):\n",
|
||||
" \"\"\"Check if a string is Base64 encoded\"\"\"\n",
|
||||
" try:\n",
|
||||
" return base64.b64encode(base64.b64decode(s)) == s.encode()\n",
|
||||
" except Exception:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def split_image_text_types(docs):\n",
|
||||
" \"\"\"Split numpy array images and texts\"\"\"\n",
|
||||
" images = []\n",
|
||||
" text = []\n",
|
||||
" for doc in docs:\n",
|
||||
" doc = doc.page_content # Extract Document contents\n",
|
||||
" if is_base64(doc):\n",
|
||||
" # Resize image to avoid OAI server error\n",
|
||||
" images.append(\n",
|
||||
" resize_base64_image(doc, size=(250, 250))\n",
|
||||
" ) # base64 encoded str\n",
|
||||
" else:\n",
|
||||
" text.append(doc)\n",
|
||||
" return {\"images\": images, \"texts\": text}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23a2c1d8-fea6-4152-b184-3172dd46c735",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Currently, we format the inputs using a `RunnableLambda` while we add image support to `ChatPromptTemplates`.\n",
|
||||
"\n",
|
||||
"Our runnable follows the classic RAG flow - \n",
|
||||
"\n",
|
||||
"* We first compute the context (both \"texts\" and \"images\" in this case) and the question (just a RunnablePassthrough here) \n",
|
||||
"* Then we pass this into our prompt template, which is a custom function that formats the message for the gpt-4-vision-preview model. \n",
|
||||
"* And finally we parse the output as a string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5d8919dc-c238-4746-86ba-45d940a7d260",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c93fab3-74c4-4f1d-958a-0bc4cdd0797e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def prompt_func(data_dict):\n",
|
||||
" # Joining the context texts into a single string\n",
|
||||
" formatted_texts = \"\\n\".join(data_dict[\"text_context\"][\"texts\"])\n",
|
||||
" messages = []\n",
|
||||
"\n",
|
||||
" # Adding image(s) to the messages if present\n",
|
||||
" if data_dict[\"image_context\"][\"images\"]:\n",
|
||||
" image_message = {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": f\"data:image/jpeg;base64,{data_dict['image_context']['images'][0]}\"\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" messages.append(image_message)\n",
|
||||
"\n",
|
||||
" # Adding the text message for analysis\n",
|
||||
" text_message = {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": (\n",
|
||||
" \"As an expert art critic and historian, your task is to analyze and interpret images, \"\n",
|
||||
" \"considering their historical and cultural significance. Alongside the images, you will be \"\n",
|
||||
" \"provided with related text to offer context. Both will be retrieved from a vectorstore based \"\n",
|
||||
" \"on user-input keywords. Please use your extensive knowledge and analytical skills to provide a \"\n",
|
||||
" \"comprehensive summary that includes:\\n\"\n",
|
||||
" \"- A detailed description of the visual elements in the image.\\n\"\n",
|
||||
" \"- The historical and cultural context of the image.\\n\"\n",
|
||||
" \"- An interpretation of the image's symbolism and meaning.\\n\"\n",
|
||||
" \"- Connections between the image and the related text.\\n\\n\"\n",
|
||||
" f\"User-provided keywords: {data_dict['question']}\\n\\n\"\n",
|
||||
" \"Text and / or tables:\\n\"\n",
|
||||
" f\"{formatted_texts}\"\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" messages.append(text_message)\n",
|
||||
"\n",
|
||||
" return [HumanMessage(content=messages)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0, model=\"gpt-4-vision-preview\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
"# RAG pipeline\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"text_context\": text_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"image_context\": image_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | RunnableLambda(prompt_func)\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1566096d-97c2-4ddc-ba4a-6ef88c525e4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test retrieval and run RAG"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90121e56-674b-473b-871d-6e4753fd0c45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import HTML, display\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def plt_img_base64(img_base64):\n",
|
||||
" # Create an HTML img tag with the base64 string as the source\n",
|
||||
" image_html = f'<img src=\"data:image/jpeg;base64,{img_base64}\" />'\n",
|
||||
"\n",
|
||||
" # Display the image by rendering the HTML\n",
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = text_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "44eaa532-f035-4c04-b578-02339d42554c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = image_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "69fb15fd-76fc-49b4-806d-c4db2990027d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke(\"Women with children\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "227f08b8-e732-4089-b65c-6eb6f9e48f15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the images retrieved in the LangSmith trace:\n",
|
||||
"\n",
|
||||
"LangSmith [trace](https://smith.langchain.com/public/69c558a5-49dc-4c60-a49b-3adbb70f74c5/r/e872c2c8-528c-468f-aefd-8b5cd730a673)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -86,8 +86,7 @@
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"# please make sure this user has sufficient privileges to perform all below\n",
|
||||
"# Update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
@@ -97,40 +96,45 @@
|
||||
" print(\"Connection successful!\")\n",
|
||||
"\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error setting up user.');\n",
|
||||
" end;\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
"\n",
|
||||
" -- network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db));\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" cursor.close()\n",
|
||||
" try:\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- Drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error dropping user: ' || SQLERRM);\n",
|
||||
" end;\n",
|
||||
" \n",
|
||||
" -- Create user and grant privileges\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
" \n",
|
||||
" -- Network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db)\n",
|
||||
" );\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"User setup failed with error: {e}\")\n",
|
||||
" finally:\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"User setup failed!\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" print(f\"Connection failed with error: {e}\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
@@ -526,8 +530,6 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** Currently, OracleEmbeddings processes each embedding generation request individually, without batching, by calling REST endpoints separately for each request. This method could potentially lead to exceeding the maximum request per minute quota set by some providers. However, we are actively working to enhance this process by implementing request batching, which will allow multiple embedding requests to be combined into fewer API calls, thereby optimizing our use of provider resources and adhering to their request limits. This update is expected to be rolled out soon, eliminating the current limitation.\n",
|
||||
"\n",
|
||||
"***Note:*** Users may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -128,11 +128,11 @@ def _load_package_modules(
|
||||
of the modules/packages are part of the package vs. 3rd party or built-in.
|
||||
|
||||
Parameters:
|
||||
package_directory: Path to the package directory.
|
||||
submodule: Optional name of submodule to load.
|
||||
package_directory (Union[str, Path]): Path to the package directory.
|
||||
submodule (Optional[str]): Optional name of submodule to load.
|
||||
|
||||
Returns:
|
||||
list: A list of loaded module objects.
|
||||
Dict[str, ModuleMembers]: A dictionary where keys are module names and values are ModuleMembers objects.
|
||||
"""
|
||||
package_path = (
|
||||
Path(package_directory)
|
||||
|
||||
File diff suppressed because one or more lines are too long
1187
docs/data/people.yml
1187
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,9 @@ LangChain implements the latest research in the field of Natural Language Proces
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
Templates, and Cookbooks.
|
||||
|
||||
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
|
||||
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
@@ -21,21 +24,21 @@ This page contains `arXiv` papers referenced in the LangChain Documentation, API
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
@@ -415,7 +418,7 @@ publicly available.
|
||||
- **URL:** http://arxiv.org/abs/2304.03442v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
- **Cookbook:** [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
|
||||
|
||||
**Abstract:** Believable proxies of human behavior can empower interactive applications
|
||||
ranging from immersive environments to rehearsal spaces for interpersonal
|
||||
@@ -537,7 +540,7 @@ more than 1/1,000th the compute of GPT-4.
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
|
||||
- **API Reference:** [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
@@ -562,7 +565,7 @@ family, and discuss robustness and security.
|
||||
- **URL:** http://arxiv.org/abs/2212.10496v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
- **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
|
||||
@@ -626,7 +629,7 @@ further work on logical fallacy identification.
|
||||
- **URL:** http://arxiv.org/abs/2211.13892v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
- **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
|
||||
learning from explanations in prompts, but there has been limited understanding
|
||||
@@ -654,7 +657,7 @@ performance across three real-world tasks on multiple LLMs.
|
||||
- **URL:** http://arxiv.org/abs/2211.10435v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain)
|
||||
- **API Reference:** [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
- **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
|
||||
@@ -717,7 +720,7 @@ TensorFlow, JAX, and integrate with numerous MLOps tools.
|
||||
- **URL:** http://arxiv.org/abs/2205.12654v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
- **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
|
||||
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
|
||||
languages is challenging, in particular to cover the long tail of low-resource
|
||||
@@ -746,7 +749,7 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems.
|
||||
- **URL:** http://arxiv.org/abs/2204.00498v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
- **API Reference:** [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
|
||||
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
|
||||
language model. We find that, without any finetuning, Codex is a strong
|
||||
@@ -765,7 +768,7 @@ few-shot examples.
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
@@ -829,7 +832,7 @@ https://github.com/OpenAI/CLIP.
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
|
||||
@@ -38,7 +38,7 @@ All dependencies in this package are optional to keep the package as lightweight
|
||||
`langgraph` is an extension of `langchain` aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for constructing more contr
|
||||
LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows.
|
||||
|
||||
### [`langserve`](/docs/langserve)
|
||||
|
||||
@@ -58,6 +58,7 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
/>
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
<span data-heading-keywords="lcel"></span>
|
||||
|
||||
LangChain Expression Language, or LCEL, is a declarative way to chain LangChain components.
|
||||
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
|
||||
@@ -88,6 +89,7 @@ With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.sm
|
||||
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
|
||||
|
||||
### Runnable interface
|
||||
<span data-heading-keywords="invoke"></span>
|
||||
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
|
||||
@@ -128,6 +130,7 @@ LangChain provides standard, extendable interfaces and external integrations for
|
||||
Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.
|
||||
|
||||
### Chat models
|
||||
<span data-heading-keywords="chat model,chat models"></span>
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).
|
||||
These are traditionally newer models (older models are generally `LLMs`, see above).
|
||||
@@ -150,7 +153,10 @@ Generally, such models are better at tool calling than non-fine-tuned models, an
|
||||
Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
|
||||
:::
|
||||
|
||||
For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).
|
||||
|
||||
### LLMs
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are `ChatModels`, see below).
|
||||
@@ -161,6 +167,8 @@ When messages are passed in as input, they will be formatted into a string under
|
||||
|
||||
LangChain does not provide any LLMs, rather we rely on third party integrations.
|
||||
|
||||
For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).
|
||||
|
||||
### Messages
|
||||
|
||||
Some language models take a list of messages as input and return a message.
|
||||
@@ -214,6 +222,8 @@ This represents the result of a tool call. This is distinct from a FunctionMessa
|
||||
|
||||
|
||||
### Prompt templates
|
||||
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
|
||||
|
||||
Prompt templates help to translate user input and parameters into instructions for a language model.
|
||||
This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.
|
||||
|
||||
@@ -222,7 +232,7 @@ Prompt Templates take as input a dictionary, where each key represents a variabl
|
||||
Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or a list of messages.
|
||||
The reason this PromptValue exists is to make it easy to switch between strings and messages.
|
||||
|
||||
There are a few different types of prompt templates
|
||||
There are a few different types of prompt templates:
|
||||
|
||||
#### String PromptTemplates
|
||||
|
||||
@@ -258,6 +268,7 @@ The first is a system message, that has no variables to format.
|
||||
The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in.
|
||||
|
||||
#### MessagesPlaceholder
|
||||
<span data-heading-keywords="messagesplaceholder"></span>
|
||||
|
||||
This prompt template is responsible for adding a list of messages in a particular place.
|
||||
In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.
|
||||
@@ -289,14 +300,18 @@ prompt_template = ChatPromptTemplate.from_messages([
|
||||
])
|
||||
```
|
||||
|
||||
For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).
|
||||
|
||||
### Example selectors
|
||||
One common prompting technique for achieving better performance is to include examples as part of the prompt.
|
||||
This gives the language model concrete examples of how it should behave.
|
||||
Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
|
||||
Example Selectors are classes responsible for selecting and then formatting examples into prompts.
|
||||
|
||||
For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).
|
||||
|
||||
### Output parsers
|
||||
<span data-heading-keywords="output parser"></span>
|
||||
|
||||
:::note
|
||||
|
||||
@@ -340,6 +355,8 @@ LangChain has lots of different types of output parsers. This is a list of outpu
|
||||
| [Datetime](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
|
||||
| [Structured](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
|
||||
|
||||
For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
|
||||
|
||||
### Chat history
|
||||
Most LLM applications have a conversational interface.
|
||||
An essential component of a conversation is being able to refer to information introduced earlier in the conversation.
|
||||
@@ -350,6 +367,7 @@ This `ChatHistory` will keep track of inputs and outputs of the underlying chain
|
||||
Future interactions will then load those messages and pass them into the chain as part of the input.
|
||||
|
||||
### Documents
|
||||
<span data-heading-keywords="document,documents"></span>
|
||||
|
||||
A Document object in LangChain contains information about some data. It has two attributes:
|
||||
|
||||
@@ -357,6 +375,7 @@ A Document object in LangChain contains information about some data. It has two
|
||||
- `metadata: dict`: Arbitrary metadata associated with this document. Can track the document id, file name, etc.
|
||||
|
||||
### Document loaders
|
||||
<span data-heading-keywords="document loader,document loaders"></span>
|
||||
|
||||
These classes load Document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc.
|
||||
|
||||
@@ -372,6 +391,8 @@ loader = CSVLoader(
|
||||
data = loader.load()
|
||||
```
|
||||
|
||||
For specifics on how to use document loaders, see the [relevant how-to guides here](/docs/how_to/#document-loaders).
|
||||
|
||||
### Text splitters
|
||||
|
||||
Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents.
|
||||
@@ -389,14 +410,22 @@ That means there are two different axes along which you can customize your text
|
||||
1. How the text is split
|
||||
2. How the chunk size is measured
|
||||
|
||||
For specifics on how to use text splitters, see the [relevant how-to guides here](/docs/how_to/#text-splitters).
|
||||
|
||||
### Embedding models
|
||||
<span data-heading-keywords="embedding,embeddings"></span>
|
||||
|
||||
The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.
|
||||
|
||||
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
|
||||
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
|
||||
For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models).
|
||||
|
||||
### Vector stores
|
||||
<span data-heading-keywords="vector,vectorstore,vectorstores,vector store,vector stores"></span>
|
||||
|
||||
One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors,
|
||||
and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query.
|
||||
A vector store takes care of storing embedded data and performing vector search for you.
|
||||
@@ -408,7 +437,11 @@ vectorstore = MyVectorStore()
|
||||
retriever = vectorstore.as_retriever()
|
||||
```
|
||||
|
||||
For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores).
|
||||
|
||||
### Retrievers
|
||||
<span data-heading-keywords="retriever,retrievers"></span>
|
||||
|
||||
A retriever is an interface that returns documents given an unstructured query.
|
||||
It is more general than a vector store.
|
||||
A retriever does not need to be able to store documents, only to return (or retrieve) them.
|
||||
@@ -416,7 +449,10 @@ Retrievers can be created from vectorstores, but are also broad enough to includ
|
||||
|
||||
Retrievers accept a string query as input and return a list of Document's as output.
|
||||
|
||||
For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
|
||||
|
||||
### Tools
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
|
||||
@@ -442,6 +478,8 @@ Generally, when designing tools to be used by a chat model or LLM, it is importa
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
|
||||
### Toolkits
|
||||
|
||||
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
|
||||
@@ -461,7 +499,7 @@ tools = toolkit.get_tools()
|
||||
|
||||
By themselves, language models can't take actions - they just output text.
|
||||
A big use case for LangChain is creating **agents**.
|
||||
Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.
|
||||
Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.
|
||||
The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.
|
||||
|
||||
[LangGraph](https://github.com/langchain-ai/langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents.
|
||||
@@ -474,7 +512,7 @@ In order to solve that we built LangGraph to be this flexible, highly-controllab
|
||||
|
||||
If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/docs/how_to/agent_executor).
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent)
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
|
||||
### Multimodal
|
||||
|
||||
@@ -482,6 +520,8 @@ Some models are multimodal, accepting images, audio and even video as inputs. Th
|
||||
|
||||
In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
|
||||
|
||||
For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).
|
||||
|
||||
### Callbacks
|
||||
|
||||
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
|
||||
@@ -552,6 +592,8 @@ This is a common reason why you may fail to see events being emitted from custom
|
||||
runnables or tools.
|
||||
:::
|
||||
|
||||
For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks).
|
||||
|
||||
## Techniques
|
||||
|
||||
### Function/tool calling
|
||||
@@ -623,6 +665,7 @@ LangChain provides several advanced retrieval types. A full list is below, along
|
||||
| [Multi-Query Retriever](/docs/how_to/MultiQueryRetriever/) | Any | Yes | If users are asking questions that are complex and require multiple pieces of distinct information to respond | This uses an LLM to generate multiple queries from the original one. This is useful when the original query needs pieces of information about multiple topics to be properly answered. By generating multiple queries, we can then fetch documents for each of them. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
|
||||
For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/).
|
||||
|
||||
### Text splitting
|
||||
|
||||
|
||||
@@ -206,9 +206,7 @@ ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
|
||||
|
||||
`langchain-core` and partner packages **do not use** optional dependencies in this way.
|
||||
|
||||
You only need to add a new dependency if a **unit test** relies on the package.
|
||||
If your package is only required for **integration tests**, then you can skip these
|
||||
steps and leave all pyproject.toml and poetry.lock files alone.
|
||||
You'll notice that `pyproject.toml` and `poetry.lock` are **not** touched when you add optional dependencies below.
|
||||
|
||||
If you're adding a new dependency to Langchain, assume that it will be an optional dependency, and
|
||||
that most users won't have it installed.
|
||||
@@ -216,20 +214,12 @@ that most users won't have it installed.
|
||||
Users who do not have the dependency installed should be able to **import** your code without
|
||||
any side effects (no warnings, no errors, no exceptions).
|
||||
|
||||
To introduce the dependency to the pyproject.toml file correctly, please do the following:
|
||||
To introduce the dependency to a library, please do the following:
|
||||
|
||||
1. Add the dependency to the main group as an optional dependency
|
||||
```bash
|
||||
poetry add --optional [package_name]
|
||||
```
|
||||
2. Open pyproject.toml and add the dependency to the `extended_testing` extra
|
||||
3. Relock the poetry file to update the extra.
|
||||
```bash
|
||||
poetry lock --no-update
|
||||
```
|
||||
4. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
1. Open extended_testing_deps.txt and add the dependency
|
||||
2. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
test makes use of lightweight fixtures to test the logic of the code.
|
||||
5. Please use the `@pytest.mark.requires(package_name)` decorator for any tests that require the dependency.
|
||||
3. Please use the `@pytest.mark.requires(package_name)` decorator for any unit tests that require the dependency.
|
||||
|
||||
## Adding a Jupyter Notebook
|
||||
|
||||
|
||||
BIN
docs/docs/example_data/nke-10k-2023.pdf
Normal file
BIN
docs/docs/example_data/nke-10k-2023.pdf
Normal file
Binary file not shown.
@@ -15,7 +15,11 @@
|
||||
"id": "f4c03f40-1328-412d-8a48-1db0cd481b77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"# Build an Agent with AgentExecutor (Legacy)\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This section will cover building with the legacy LangChain AgentExecutor. These are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph Agents](/docs/concepts/#langgraph) or the [migration guide](/docs/how_to/migrate_agent/)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
@@ -24,10 +28,6 @@
|
||||
"\n",
|
||||
"In this tutorial, we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd reccommend checking out [LangGraph](/docs/concepts/#langgraph)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Concepts we will cover are:\n",
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "f781411d",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [charactertextsplitter]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3ee8d00",
|
||||
|
||||
157
docs/docs/how_to/chat_models_universal_init.ipynb
Normal file
157
docs/docs/how_to/chat_models_universal_init.ipynb
Normal file
@@ -0,0 +1,157 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cfdf4f09-8125-4ed1-8063-6feed57da8a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to let your end users choose their model\n",
|
||||
"\n",
|
||||
"Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n",
|
||||
"\n",
|
||||
":::tip Supported models\n",
|
||||
"\n",
|
||||
"See the [init_chat_model()](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
|
||||
"\n",
|
||||
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea2c9f57-a796-45f8-b6f4-3efd3f361a9b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?\n",
|
||||
"\n",
|
||||
"Claude Opus: My name is Claude. It's nice to meet you!\n",
|
||||
"\n",
|
||||
"Gemini 1.5: I am a large language model, trained by Google. I do not have a name. \n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"# Returns a langchain_openai.ChatOpenAI instance.\n",
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
|
||||
"# Returns a langchain_anthropic.ChatAnthropic instance.\n",
|
||||
"claude_opus = init_chat_model(\n",
|
||||
" \"claude-3-opus-20240229\", model_provider=\"anthropic\", temperature=0\n",
|
||||
")\n",
|
||||
"# Returns a langchain_google_vertexai.ChatVertexAI instance.\n",
|
||||
"gemini_15 = init_chat_model(\n",
|
||||
" \"gemini-1.5-pro\", model_provider=\"google_vertexai\", temperature=0\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Since all model integrations implement the ChatModel interface, you can use them in the same way.\n",
|
||||
"print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||
"print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Simple config example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_config = {\n",
|
||||
" \"model\": \"...user-specified...\",\n",
|
||||
" \"model_provider\": \"...user-specified...\",\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"max_tokens\": 1000,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(**user_config)\n",
|
||||
"llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f811f219-5e78-4b62-b495-915d52a22532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Inferring model provider\n",
|
||||
"\n",
|
||||
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
|
||||
"claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)\n",
|
||||
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,35 +14,51 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls."
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-openai >= 0.1.8`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c7d1338-dd1b-4d06-b33d-d5cffc49fd6a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a55e87a-3291-4e7f-8e8e-4c69b0854384",
|
||||
"id": "598ae1e2-a52d-4459-81fd-cdc68b06742a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using AIMessage.response_metadata\n",
|
||||
"## Using LangSmith\n",
|
||||
"\n",
|
||||
"A number of model providers return token usage information as part of the chat generation response. When available, this is included in the [`AIMessage.response_metadata`](/docs/how_to/response_metadata) field. Here's an example with OpenAI:"
|
||||
"You can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"## Using AIMessage.usage_metadata\n",
|
||||
"\n",
|
||||
"A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model.\n",
|
||||
"\n",
|
||||
"LangChain `AIMessage` objects include a [usage_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
|
||||
"\n",
|
||||
"Examples:\n",
|
||||
"\n",
|
||||
"**OpenAI**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "467ccdeb-6b62-45e5-816e-167cd24d2586",
|
||||
"id": "b39bf807-4125-4db4-bbf7-28a46afff6b4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'token_usage': {'completion_tokens': 225,\n",
|
||||
" 'prompt_tokens': 17,\n",
|
||||
" 'total_tokens': 242},\n",
|
||||
" 'model_name': 'gpt-4-turbo',\n",
|
||||
" 'system_fingerprint': 'fp_76f018034d',\n",
|
||||
" 'finish_reason': 'stop',\n",
|
||||
" 'logprobs': None}"
|
||||
"{'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@@ -51,37 +67,33 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# !pip install -qU langchain-openai\n",
|
||||
"# # !pip install -qU langchain-openai\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4-turbo\")\n",
|
||||
"msg = llm.invoke([(\"human\", \"What's the oldest known example of cuneiform\")])\n",
|
||||
"msg.response_metadata"
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"openai_response = llm.invoke(\"hello\")\n",
|
||||
"openai_response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d5026e9-3ad4-41e6-9946-9f1a26f4a21f",
|
||||
"id": "2299c44a-2fe6-4d52-a6a2-99ff6d231c73",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And here's an example with Anthropic:"
|
||||
"**Anthropic**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "145404f1-e088-4824-b468-236c486a9903",
|
||||
"id": "9c82ff80-ec4e-4049-b019-5f0bbd7df82a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'id': 'msg_01P61rdHbapEo6h3fjpfpCQT',\n",
|
||||
" 'model': 'claude-3-sonnet-20240229',\n",
|
||||
" 'stop_reason': 'end_turn',\n",
|
||||
" 'stop_sequence': None,\n",
|
||||
" 'usage': {'input_tokens': 17, 'output_tokens': 306}}"
|
||||
"{'input_tokens': 8, 'output_tokens': 12, 'total_tokens': 20}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
@@ -94,9 +106,222 @@
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
|
||||
"msg = llm.invoke([(\"human\", \"What's the oldest known example of cuneiform\")])\n",
|
||||
"msg.response_metadata"
|
||||
"llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
"anthropic_response = llm.invoke(\"hello\")\n",
|
||||
"anthropic_response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d4efc15-ba9f-4b3d-9278-8e01f99f263f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using AIMessage.response_metadata\n",
|
||||
"\n",
|
||||
"Metadata from the model response is also included in the AIMessage [response_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f156f9da-21f2-4c81-a714-54cbf9ad393e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI: {'completion_tokens': 9, 'prompt_tokens': 8, 'total_tokens': 17}\n",
|
||||
"\n",
|
||||
"Anthropic: {'input_tokens': 8, 'output_tokens': 12}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f'OpenAI: {openai_response.response_metadata[\"token_usage\"]}\\n')\n",
|
||||
"print(f'Anthropic: {anthropic_response.response_metadata[\"usage\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4ef2c43-0ff6-49eb-9782-e4070c9da8d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Some providers support token count metadata in a streaming context.\n",
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"By default, the last message chunk in a stream will include a `\"finish_reason\"` in the message's `response_metadata` attribute. If we include token usage in streaming mode, an additional chunk containing usage metadata will be added to the end of the stream, such that `\"finish_reason\"` appears on the second to last message chunk.\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "07f0c872-6b6c-4fed-a129-9b5a858505be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"\n",
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
|
||||
" print(chunk)\n",
|
||||
" aggregate = chunk if aggregate is None else aggregate + chunk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd809ded-8b13-4d5f-be5e-277b79d51802",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the usage metadata will be included in the sum of the individual message chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3db7bc03-a7d4-4704-92ab-f8ba92ef59ae",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! How can I assist you today?\n",
|
||||
"{'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(aggregate.content)\n",
|
||||
"print(aggregate.usage_metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "67117f2b-ce68-4c1e-9556-2d3849f90e1b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\"):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Token usage: {'input_tokens': 79, 'output_tokens': 23, 'total_tokens': 102}\n",
|
||||
"\n",
|
||||
"setup='Why was the math book sad?' punchline='Because it had too many problems.'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" \"\"\"Joke to tell user.\"\"\"\n",
|
||||
"\n",
|
||||
" setup: str = Field(description=\"question to set up a joke\")\n",
|
||||
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
|
||||
")\n",
|
||||
"# Under the hood, .with_structured_output binds tools to the\n",
|
||||
"# chat model and appends a parser.\n",
|
||||
"structured_llm = llm.with_structured_output(Joke)\n",
|
||||
"\n",
|
||||
"async for event in structured_llm.astream_events(\"Tell me a joke\", version=\"v2\"):\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" print(f'Token usage: {event[\"data\"][\"output\"].usage_metadata}\\n')\n",
|
||||
" elif event[\"event\"] == \"on_chain_end\":\n",
|
||||
" print(event[\"data\"][\"output\"])\n",
|
||||
" else:\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bc8d313-4bef-463e-89a5-236d8bb6ab2f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Token usage is also visible in the corresponding [LangSmith trace](https://smith.langchain.com/public/fe6513d5-7212-4045-82e0-fefa28bc7656/r) in the payload from the chat model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,7 +340,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 9,
|
||||
"id": "31667d54",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -123,11 +348,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 26\n",
|
||||
"Tokens Used: 27\n",
|
||||
"\tPrompt Tokens: 11\n",
|
||||
"\tCompletion Tokens: 15\n",
|
||||
"\tCompletion Tokens: 16\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $0.00056\n"
|
||||
"Total Cost (USD): $2.95e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -136,7 +361,7 @@
|
||||
"\n",
|
||||
"from langchain_community.callbacks.manager import get_openai_callback\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4-turbo\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
@@ -153,7 +378,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 10,
|
||||
"id": "e09420f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -161,7 +386,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"52\n"
|
||||
"55\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -172,6 +397,39 @@
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"28\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
|
||||
" pass\n",
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8186e7b",
|
||||
@@ -182,7 +440,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 12,
|
||||
"id": "5d1125c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -211,15 +469,15 @@
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"We have to set `stream_runnable=False` for token counting to work. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events. However, OpenAI does not return token counts when streaming model responses, so we need to turn off the underlying streaming.\n",
|
||||
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "2f98c536",
|
||||
"execution_count": 13,
|
||||
"id": "3950d88b-8bfb-4294-b75b-e6fd421e633c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -230,46 +488,51 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `Hummingbird`\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'hummingbird scientific name'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Hummingbird\n",
|
||||
"Summary: Hummingbirds are birds native to the Americas and comprise the biological family Trochilidae. With approximately 366 species and 113 genera, they occur from Alaska to Tierra del Fuego, but most species are found in Central and South America. As of 2024, 21 hummingbird species are listed as endangered or critically endangered, with numerous species declining in population.Hummingbirds have varied specialized characteristics to enable rapid, maneuverable flight: exceptional metabolic capacity, adaptations to high altitude, sensitive visual and communication abilities, and long-distance migration in some species. Among all birds, male hummingbirds have the widest diversity of plumage color, particularly in blues, greens, and purples. Hummingbirds are the smallest mature birds, measuring 7.5–13 cm (3–5 in) in length. The smallest is the 5 cm (2.0 in) bee hummingbird, which weighs less than 2.0 g (0.07 oz), and the largest is the 23 cm (9 in) giant hummingbird, weighing 18–24 grams (0.63–0.85 oz). Noted for long beaks, hummingbirds are specialized for feeding on flower nectar, but all species also consume small insects.\n",
|
||||
"Summary: Hummingbirds are birds native to the Americas and comprise the biological family Trochilidae. With approximately 366 species and 113 genera, they occur from Alaska to Tierra del Fuego, but most species are found in Central and South America. As of 2024, 21 hummingbird species are listed as endangered or critically endangered, with numerous species declining in population.\n",
|
||||
"Hummingbirds have varied specialized characteristics to enable rapid, maneuverable flight: exceptional metabolic capacity, adaptations to high altitude, sensitive visual and communication abilities, and long-distance migration in some species. Among all birds, male hummingbirds have the widest diversity of plumage color, particularly in blues, greens, and purples. Hummingbirds are the smallest mature birds, measuring 7.5–13 cm (3–5 in) in length. The smallest is the 5 cm (2.0 in) bee hummingbird, which weighs less than 2.0 g (0.07 oz), and the largest is the 23 cm (9 in) giant hummingbird, weighing 18–24 grams (0.63–0.85 oz). Noted for long beaks, hummingbirds are specialized for feeding on flower nectar, but all species also consume small insects.\n",
|
||||
"They are known as hummingbirds because of the humming sound created by their beating wings, which flap at high frequencies audible to other birds and humans. They hover at rapid wing-flapping rates, which vary from around 12 beats per second in the largest species to 80 per second in small hummingbirds.\n",
|
||||
"Hummingbirds have the highest mass-specific metabolic rate of any homeothermic animal. To conserve energy when food is scarce and at night when not foraging, they can enter torpor, a state similar to hibernation, and slow their metabolic rate to 1⁄15 of its normal rate. While most hummingbirds do not migrate, the rufous hummingbird has one of the longest migrations among birds, traveling twice per year between Alaska and Mexico, a distance of about 3,900 miles (6,300 km).\n",
|
||||
"Hummingbirds split from their sister group, the swifts and treeswifts, around 42 million years ago. The oldest known fossil hummingbird is Eurotrochilus, from the Rupelian Stage of Early Oligocene Europe.\n",
|
||||
"\n",
|
||||
"Page: Rufous hummingbird\n",
|
||||
"Summary: The rufous hummingbird (Selasphorus rufus) is a small hummingbird, about 8 cm (3.1 in) long with a long, straight and slender bill. These birds are known for their extraordinary flight skills, flying 2,000 mi (3,200 km) during their migratory transits. It is one of nine species in the genus Selasphorus.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Bee hummingbird\n",
|
||||
"Summary: The bee hummingbird, zunzuncito or Helena hummingbird (Mellisuga helenae) is a species of hummingbird, native to the island of Cuba in the Caribbean. It is the smallest known bird. The bee hummingbird feeds on nectar of flowers and bugs found in Cuba.\n",
|
||||
"\n",
|
||||
"Page: Hummingbird cake\n",
|
||||
"Summary: Hummingbird cake is a banana-pineapple spice cake originating in Jamaica and a popular dessert in the southern United States since the 1970s. Ingredients include flour, sugar, salt, vegetable oil, ripe banana, pineapple, cinnamon, pecans, vanilla extract, eggs, and leavening agent. It is often served with cream cheese frosting.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `Fastest bird`\n",
|
||||
"Page: Anna's hummingbird\n",
|
||||
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
|
||||
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
|
||||
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Fastest animals\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Fastest animals\n",
|
||||
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon, able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"Page: Ostrich\n",
|
||||
"Summary: Ostriches are large flightless birds. They are the heaviest and largest living birds, with adult common ostriches weighing anywhere between 63.5 and 145 kilograms and laying the largest eggs of any living land animal. With the ability to run at 70 km/h (43.5 mph), they are the fastest birds on land. They are farmed worldwide, with significant industries in the Philippines and in Namibia. Ostrich leather is a lucrative commodity, and the large feathers are used as plumes for the decoration of ceremonial headgear. Ostrich eggs have been used by humans for millennia.\n",
|
||||
"Ostriches are of the genus Struthio in the order Struthioniformes, part of the infra-class Palaeognathae, a diverse group of flightless birds also known as ratites that includes the emus, rheas, cassowaries, kiwis and the extinct elephant birds and moas. There are two living species of ostrich: the common ostrich, native to large areas of sub-Saharan Africa, and the Somali ostrich, native to the Horn of Africa. The common ostrich was historically native to the Arabian Peninsula, and ostriches were present across Asia as far east as China and Mongolia during the Late Pleistocene and possibly into the Holocene.\u001b[0m\u001b[32;1m\u001b[1;3m### Hummingbird's Scientific Name\n",
|
||||
"The scientific name for the bee hummingbird, which is the smallest known bird and a species of hummingbird, is **Mellisuga helenae**. It is native to Cuba.\n",
|
||||
"\n",
|
||||
"### Fastest Bird Species\n",
|
||||
"The fastest bird in terms of airspeed is the **peregrine falcon**, which can exceed speeds of 320 km/h (200 mph) during its diving flight. In level flight, the fastest confirmed speed is held by the **common swift**, which can fly at 111.5 km/h (69.3 mph).\u001b[0m\n",
|
||||
"Page: Falcon\n",
|
||||
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
|
||||
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
|
||||
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
|
||||
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
|
||||
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 1583\n",
|
||||
"Prompt Tokens: 1412\n",
|
||||
"Completion Tokens: 171\n",
|
||||
"Total Cost (USD): $0.019250000000000003\n"
|
||||
"Total Tokens: 1787\n",
|
||||
"Prompt Tokens: 1687\n",
|
||||
"Completion Tokens: 100\n",
|
||||
"Total Cost (USD): $0.0009935\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -298,19 +561,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "4a3eced5-2ff7-49a7-a48b-768af8658323",
|
||||
"execution_count": 12,
|
||||
"id": "1837c807-136a-49d8-9c33-060e58dc16d2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 0\n",
|
||||
"\tPrompt Tokens: 0\n",
|
||||
"\tCompletion Tokens: 0\n",
|
||||
"Tokens Used: 96\n",
|
||||
"\tPrompt Tokens: 26\n",
|
||||
"\tCompletion Tokens: 70\n",
|
||||
"Successful Requests: 2\n",
|
||||
"Total Cost (USD): $0.0\n"
|
||||
"Total Cost (USD): $0.001888\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -364,7 +627,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,15 +1,5 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "77bf57fb-e990-45f2-8b5f-c76388b05966",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [LCEL]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "50d57bf2-7104-4570-b3e5-90fd71e1bea1",
|
||||
|
||||
@@ -4,13 +4,17 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create an Ensemble Retriever\n",
|
||||
"# How to combine results from multiple retrievers\n",
|
||||
"\n",
|
||||
"The `EnsembleRetriever` takes a list of retrievers as input and ensemble the results of their `get_relevant_documents()` methods and rerank the results based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
|
||||
"The [EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
|
||||
"\n",
|
||||
"By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. \n",
|
||||
"\n",
|
||||
"The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity."
|
||||
"The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity.\n",
|
||||
"\n",
|
||||
"## Basic usage\n",
|
||||
"\n",
|
||||
"Below we demonstrate ensembling of a [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,22 +28,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import EnsembleRetriever\n",
|
||||
"from langchain_community.retrievers import BM25Retriever\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"doc_list_1 = [\n",
|
||||
" \"I like apples\",\n",
|
||||
" \"I like oranges\",\n",
|
||||
@@ -71,19 +68,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='You like apples', metadata={'source': 2}),\n",
|
||||
" Document(page_content='I like apples', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like oranges', metadata={'source': 2}),\n",
|
||||
" Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]"
|
||||
"[Document(page_content='I like apples', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like apples', metadata={'source': 2}),\n",
|
||||
" Document(page_content='Apples and oranges are fruits', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like oranges', metadata={'source': 2})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -99,24 +96,17 @@
|
||||
"source": [
|
||||
"## Runtime Configuration\n",
|
||||
"\n",
|
||||
"We can also configure the retrievers at runtime. In order to do this, we need to mark the fields as configurable"
|
||||
"We can also configure the individual retrievers at runtime using [configurable fields](/docs/how_to/configure). Below we update the \"top-k\" parameter for the FAISS retriever specifically:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import ConfigurableField"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"\n",
|
||||
"faiss_retriever = faiss_vectorstore.as_retriever(\n",
|
||||
" search_kwargs={\"k\": 2}\n",
|
||||
").configurable_fields(\n",
|
||||
@@ -125,15 +115,8 @@
|
||||
" name=\"Search Kwargs\",\n",
|
||||
" description=\"The search kwargs to use\",\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
")\n",
|
||||
"\n",
|
||||
"ensemble_retriever = EnsembleRetriever(\n",
|
||||
" retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]\n",
|
||||
")"
|
||||
@@ -141,9 +124,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='I like apples', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like apples', metadata={'source': 2}),\n",
|
||||
" Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config = {\"configurable\": {\"search_kwargs_faiss\": {\"k\": 1}}}\n",
|
||||
"docs = ensemble_retriever.invoke(\"apples\", config=config)\n",
|
||||
@@ -181,7 +177,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"examples = [\n",
|
||||
" {\"input\": \"hi\", \"output\": \"ciao\"},\n",
|
||||
" {\"input\": \"bye\", \"output\": \"arrivaderci\"},\n",
|
||||
" {\"input\": \"bye\", \"output\": \"arrivederci\"},\n",
|
||||
" {\"input\": \"soccer\", \"output\": \"calcio\"},\n",
|
||||
"]"
|
||||
]
|
||||
@@ -133,7 +133,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'input': 'bye', 'output': 'arrivaderci'}]"
|
||||
"[{'input': 'bye', 'output': 'arrivederci'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
@@ -209,7 +209,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Translate the following words from English to Italain:\n",
|
||||
"Translate the following words from English to Italian:\n",
|
||||
"\n",
|
||||
"Input: hand -> Output: mano\n",
|
||||
"\n",
|
||||
@@ -222,7 +222,7 @@
|
||||
" example_selector=example_selector,\n",
|
||||
" example_prompt=example_prompt,\n",
|
||||
" suffix=\"Input: {input} -> Output:\",\n",
|
||||
" prefix=\"Translate the following words from English to Italain:\",\n",
|
||||
" prefix=\"Translate the following words from English to Italian:\",\n",
|
||||
" input_variables=[\"input\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
" # Having a good description can help improve extraction results.\n",
|
||||
" name: Optional[str] = Field(..., description=\"The name of the person\")\n",
|
||||
" hair_color: Optional[str] = Field(\n",
|
||||
" ..., description=\"The color of the peron's eyes if known\"\n",
|
||||
" ..., description=\"The color of the person's hair if known\"\n",
|
||||
" )\n",
|
||||
" height_in_meters: Optional[str] = Field(..., description=\"Height in METERs\")\n",
|
||||
"\n",
|
||||
|
||||
@@ -49,7 +49,7 @@ These are the core building blocks you can use when building applications.
|
||||
|
||||
### Prompt templates
|
||||
|
||||
Prompt Templates are responsible for formatting user input into a format that can be passed to a language model.
|
||||
[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model.
|
||||
|
||||
- [How to: use few shot examples](/docs/how_to/few_shot_examples)
|
||||
- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/)
|
||||
@@ -58,7 +58,7 @@ Prompt Templates are responsible for formatting user input into a format that ca
|
||||
|
||||
### Example selectors
|
||||
|
||||
Example Selectors are responsible for selecting the correct few shot examples to pass to the prompt.
|
||||
[Example Selectors](/docs/concepts/#example-selectors) are responsible for selecting the correct few shot examples to pass to the prompt.
|
||||
|
||||
- [How to: use example selectors](/docs/how_to/example_selectors)
|
||||
- [How to: select examples by length](/docs/how_to/example_selectors_length_based)
|
||||
@@ -68,7 +68,7 @@ Example Selectors are responsible for selecting the correct few shot examples to
|
||||
|
||||
### Chat models
|
||||
|
||||
Chat Models are newer forms of language models that take messages in and output a message.
|
||||
[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message.
|
||||
|
||||
- [How to: do function/tool calling](/docs/how_to/tool_calling)
|
||||
- [How to: get models to return structured output](/docs/how_to/structured_output)
|
||||
@@ -78,10 +78,11 @@ Chat Models are newer forms of language models that take messages in and output
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### LLMs
|
||||
|
||||
What LangChain calls LLMs are older forms of language models that take a string in and output a string.
|
||||
What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string.
|
||||
|
||||
- [How to: cache model responses](/docs/how_to/llm_caching)
|
||||
- [How to: create a custom LLM class](/docs/how_to/custom_llm)
|
||||
@@ -91,7 +92,7 @@ What LangChain calls LLMs are older forms of language models that take a string
|
||||
|
||||
### Output parsers
|
||||
|
||||
Output Parsers are responsible for taking the output of an LLM and parsing into more structured format.
|
||||
[Output Parsers](/docs/concepts/#output-parsers) are responsible for taking the output of an LLM and parsing into more structured format.
|
||||
|
||||
- [How to: use output parsers to parse an LLM response into structured format](/docs/how_to/output_parser_structured)
|
||||
- [How to: parse JSON output](/docs/how_to/output_parser_json)
|
||||
@@ -103,7 +104,7 @@ Output Parsers are responsible for taking the output of an LLM and parsing into
|
||||
|
||||
### Document loaders
|
||||
|
||||
Document Loaders are responsible for loading documents from a variety of sources.
|
||||
[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources.
|
||||
|
||||
- [How to: load CSV data](/docs/how_to/document_loader_csv)
|
||||
- [How to: load data from a directory](/docs/how_to/document_loader_directory)
|
||||
@@ -116,7 +117,7 @@ Document Loaders are responsible for loading documents from a variety of sources
|
||||
|
||||
### Text splitters
|
||||
|
||||
Text Splitters take a document and split into chunks that can be used for retrieval.
|
||||
[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval.
|
||||
|
||||
- [How to: recursively split text](/docs/how_to/recursive_text_splitter)
|
||||
- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter)
|
||||
@@ -130,20 +131,20 @@ Text Splitters take a document and split into chunks that can be used for retrie
|
||||
|
||||
### Embedding models
|
||||
|
||||
Embedding Models take a piece of text and create a numerical representation of it.
|
||||
[Embedding Models](/docs/concepts/#embedding-models) take a piece of text and create a numerical representation of it.
|
||||
|
||||
- [How to: embed text data](/docs/how_to/embed_text)
|
||||
- [How to: cache embedding results](/docs/how_to/caching_embeddings)
|
||||
|
||||
### Vector stores
|
||||
|
||||
Vector stores are databases that can efficiently store and retrieve embeddings.
|
||||
[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores)
|
||||
|
||||
### Retrievers
|
||||
|
||||
Retrievers are responsible for taking a query and returning relevant documents.
|
||||
[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever)
|
||||
- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever)
|
||||
@@ -151,7 +152,7 @@ Retrievers are responsible for taking a query and returning relevant documents.
|
||||
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
|
||||
- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever)
|
||||
- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever)
|
||||
- [How to: reorder retrieved results to put most relevant documents not in the middle](/docs/how_to/long_context_reorder)
|
||||
- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder)
|
||||
- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector)
|
||||
- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever)
|
||||
- [How to: generate metadata filters](/docs/how_to/self_query)
|
||||
@@ -166,12 +167,13 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
### Tools
|
||||
|
||||
LangChain Tools contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
|
||||
@@ -194,6 +196,8 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
|
||||
|
||||
### Callbacks
|
||||
|
||||
[Callbacks](/docs/concepts/#callbacks) allow you to hook into the various stages of your LLM application's execution.
|
||||
|
||||
- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
|
||||
- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
@@ -220,6 +224,7 @@ These guides cover use-case specific details.
|
||||
### Q&A with RAG
|
||||
|
||||
Retrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data.
|
||||
For a high-level tutorial on RAG, check out [this guide](/docs/tutorials/rag/).
|
||||
|
||||
- [How to: add chat history](/docs/how_to/qa_chat_history_how_to/)
|
||||
- [How to: stream](/docs/how_to/qa_streaming/)
|
||||
@@ -231,6 +236,7 @@ Retrieval Augmented Generation (RAG) is a way to connect LLMs to external source
|
||||
### Extraction
|
||||
|
||||
Extraction is when you use LLMs to extract structured information from unstructured text.
|
||||
For a high level tutorial on extraction, check out [this guide](/docs/tutorials/extraction/).
|
||||
|
||||
- [How to: use reference examples](/docs/how_to/extraction_examples/)
|
||||
- [How to: handle long text](/docs/how_to/extraction_long_text/)
|
||||
@@ -239,6 +245,7 @@ Extraction is when you use LLMs to extract structured information from unstructu
|
||||
### Chatbots
|
||||
|
||||
Chatbots involve using an LLM to have a conversation.
|
||||
For a high-level tutorial on building chatbots, check out [this guide](/docs/tutorials/chatbot/).
|
||||
|
||||
- [How to: manage memory](/docs/how_to/chatbots_memory)
|
||||
- [How to: do retrieval](/docs/how_to/chatbots_retrieval)
|
||||
@@ -247,6 +254,7 @@ Chatbots involve using an LLM to have a conversation.
|
||||
### Query analysis
|
||||
|
||||
Query Analysis is the task of using an LLM to generate a query to send to a retriever.
|
||||
For a high-level tutorial on query analysis, check out [this guide](/docs/tutorials/query_analysis/).
|
||||
|
||||
- [How to: add examples to the prompt](/docs/how_to/query_few_shot)
|
||||
- [How to: handle cases where no queries are generated](/docs/how_to/query_no_queries)
|
||||
@@ -258,6 +266,7 @@ Query Analysis is the task of using an LLM to generate a query to send to a retr
|
||||
### Q&A over SQL + CSV
|
||||
|
||||
You can use LLMs to do question answering over tabular data.
|
||||
For a high-level tutorial, check out [this guide](/docs/tutorials/sql_qa/).
|
||||
|
||||
- [How to: use prompting to improve results](/docs/how_to/sql_prompting)
|
||||
- [How to: do query validation](/docs/how_to/sql_query_checking)
|
||||
@@ -267,8 +276,25 @@ You can use LLMs to do question answering over tabular data.
|
||||
### Q&A over graph databases
|
||||
|
||||
You can use an LLM to do question answering over graph databases.
|
||||
For a high-level tutorial, check out [this guide](/docs/tutorials/graph/).
|
||||
|
||||
- [How to: map values to a database](/docs/how_to/graph_mapping)
|
||||
- [How to: add a semantic layer over the database](/docs/how_to/graph_semantic)
|
||||
- [How to: improve results with prompting](/docs/how_to/graph_prompting)
|
||||
- [How to: construct knowledge graphs](/docs/how_to/graph_constructing)
|
||||
|
||||
## [LangGraph](https://langchain-ai.github.io/langgraph)
|
||||
|
||||
LangGraph is an extension of LangChain aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
LangGraph documentation is currently hosted on a separate site.
|
||||
You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/langgraph/how-tos/).
|
||||
|
||||
## [LangSmith](https://docs.smith.langchain.com/)
|
||||
|
||||
LangSmith allows you to closely trace, monitor and evaluate your LLM application.
|
||||
It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).
|
||||
|
||||
@@ -2,169 +2,226 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e5715368",
|
||||
"id": "90dff237-bc28-4185-a2c0-d5203bbdeacd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to track token usage for LLMs\n",
|
||||
"\n",
|
||||
"This notebook goes over how to track your token usage for specific calls. It is currently only implemented for the OpenAI API.\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"Let's first look at an extremely simple example of tracking token usage for a single LLM call."
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [LLMs](/docs/concepts/#llms)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Using LangSmith\n",
|
||||
"\n",
|
||||
"You can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"## Using callbacks\n",
|
||||
"\n",
|
||||
"There are some API-specific callback context managers that allow you to track token usage across multiple calls. You'll need to check whether such an integration is available for your particular model.\n",
|
||||
"\n",
|
||||
"If such an integration is not available for your model, you can create a custom callback manager by adapting the implementation of the [OpenAI callback manager](https://api.python.langchain.com/en/latest/_modules/langchain_community/callbacks/openai_info.html#OpenAICallbackHandler).\n",
|
||||
"\n",
|
||||
"### OpenAI\n",
|
||||
"\n",
|
||||
"Let's first look at an extremely simple example of tracking token usage for a single Chat model call.\n",
|
||||
"\n",
|
||||
":::{.callout-danger}\n",
|
||||
"\n",
|
||||
"The callback handler does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). For support in a streaming context, refer to the corresponding guide for chat models [here](/docs/how_to/chat_token_usage_tracking).\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f790edd9-823e-4bc5-befa-e9529c7237a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Single call"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9455db35",
|
||||
"id": "2eebbee2-6ca1-4fa8-a3aa-0376888ceefb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything.\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Total Tokens: 18\n",
|
||||
"Prompt Tokens: 4\n",
|
||||
"Completion Tokens: 14\n",
|
||||
"Total Cost (USD): $3.4e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(result)\n",
|
||||
" print(\"---\")\n",
|
||||
"print()\n",
|
||||
"\n",
|
||||
"print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
"print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
"print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
"print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7df3be35-dd97-4e3a-bd51-52434ab2249d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Multiple calls\n",
|
||||
"\n",
|
||||
"Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence to a chain. This will also work for an agent which may use multiple steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d1c55cc9",
|
||||
"id": "3ec10419-294c-44bf-af85-86aabf457cb6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Why did the chicken go to the seance?\n",
|
||||
"\n",
|
||||
"To talk to the other side of the road!\n",
|
||||
"--\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Why did the fish need a lawyer?\n",
|
||||
"\n",
|
||||
"Because it got caught in a net!\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"Total Tokens: 50\n",
|
||||
"Prompt Tokens: 12\n",
|
||||
"Completion Tokens: 38\n",
|
||||
"Total Cost (USD): $9.400000000000001e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"\n",
|
||||
"template = PromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = template | llm\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" response = chain.invoke({\"topic\": \"birds\"})\n",
|
||||
" print(response)\n",
|
||||
" response = chain.invoke({\"topic\": \"fish\"})\n",
|
||||
" print(\"--\")\n",
|
||||
" print(response)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print()\n",
|
||||
"print(\"---\")\n",
|
||||
"print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
"print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
"print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
"print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ad7a3fba-9fac-4222-8f87-d1d276d27d6e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Streaming\n",
|
||||
"\n",
|
||||
":::{.callout-danger}\n",
|
||||
"\n",
|
||||
"`get_openai_callback` does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). If you want to count tokens correctly in a streaming context, there are a number of options:\n",
|
||||
"\n",
|
||||
"- Use chat models as described in [this guide](/docs/how_to/chat_token_usage_tracking);\n",
|
||||
"- Implement a [custom callback handler](/docs/how_to/custom_callbacks/) that uses appropriate tokenizers to count the tokens;\n",
|
||||
"- Use a monitoring platform such as [LangSmith](https://www.langchain.com/langsmith).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Note that when using legacy language models in a streaming context, token counts are not updated:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "31667d54",
|
||||
"metadata": {},
|
||||
"id": "cd61ed79-7858-49bb-afb5-d41291f597ba",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 37\n",
|
||||
"\tPrompt Tokens: 4\n",
|
||||
"\tCompletion Tokens: 33\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $7.2e-05\n"
|
||||
"\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything!\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything.\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Total Tokens: 0\n",
|
||||
"Prompt Tokens: 0\n",
|
||||
"Completion Tokens: 0\n",
|
||||
"Total Cost (USD): $0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0ab6d27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e09420f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"72\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" result2 = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8186e7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If a chain or agent with multiple steps in it is used, it will track all those steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5d1125c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2f98c536",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m[\"Olivia Wilde and Harry Styles took fans by surprise with their whirlwind romance, which began when they met on the set of Don't Worry Darling.\", 'Olivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.', 'Olivia Wilde and Harry Styles were spotted early on in their relationship walking around London. (. Image ...', \"Looks like Olivia Wilde and Jason Sudeikis are starting 2023 on good terms. Amid their highly publicized custody battle – and the actress' ...\", 'The two started dating after Wilde split up with actor Jason Sudeikisin 2020. However, their relationship came to an end last November.', \"Olivia Wilde and Harry Styles started dating during the filming of Don't Worry Darling. While the movie got a lot of backlash because of the ...\", \"Here's what we know so far about Harry Styles and Olivia Wilde's relationship.\", 'Olivia and the Grammy winner kept their romance out of the spotlight as their relationship began just two months after her split from ex-fiancé ...', \"Harry Styles and Olivia Wilde first met on the set of Don't Worry Darling and stepped out as a couple in January 2021. Relive all their biggest relationship ...\"]\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m Harry Styles is Olivia Wilde's boyfriend.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m29 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Harry Styles is Olivia Wilde's boyfriend and his current age raised to the 0.23 power is 2.169459462491557.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 2205\n",
|
||||
"Prompt Tokens: 2053\n",
|
||||
"Completion Tokens: 152\n",
|
||||
"Total Cost (USD): $0.0441\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" response = agent.run(\n",
|
||||
" \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\"\n",
|
||||
" )\n",
|
||||
" print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
" print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
" print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
" print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
" for chunk in llm.stream(\"Tell me a joke\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)\n",
|
||||
" print(result)\n",
|
||||
" print(\"---\")\n",
|
||||
"print()\n",
|
||||
"\n",
|
||||
"print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
"print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
"print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
"print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "80ca77a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -183,7 +240,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -5,28 +5,38 @@
|
||||
"id": "fc0db1bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to reorder retrieved results to put most relevant documents not in the middle\n",
|
||||
"# How to reorder retrieved results to mitigate the \"lost in the middle\" effect\n",
|
||||
"\n",
|
||||
"No matter the architecture of your model, there is a substantial performance degradation when you include 10+ retrieved documents.\n",
|
||||
"In brief: When models must access relevant information in the middle of long contexts, they tend to ignore the provided documents.\n",
|
||||
"See: https://arxiv.org/abs/2307.03172\n",
|
||||
"Substantial performance degradations in [RAG](/docs/tutorials/rag) applications have been [documented](https://arxiv.org/abs/2307.03172) as the number of retrieved documents grows (e.g., beyond ten). In brief: models are liable to miss relevant information in the middle of long contexts.\n",
|
||||
"\n",
|
||||
"To avoid this issue you can re-order documents after retrieval to avoid performance degradation."
|
||||
"By contrast, queries against vector stores will typically return documents in descending order of relevance (e.g., as measured by cosine similarity of [embeddings](/docs/concepts/#embedding-models)).\n",
|
||||
"\n",
|
||||
"To mitigate the [\"lost in the middle\"](https://arxiv.org/abs/2307.03172) effect, you can re-order documents after retrieval such that the most relevant documents are positioned at extrema (e.g., the first and last pieces of context), and the least relevant documents are positioned in the middle. In some cases this can help surface the most relevant information to LLMs.\n",
|
||||
"\n",
|
||||
"The [LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "74d1ebe8",
|
||||
"id": "2074fdaa-edff-468a-970f-6f5f26e93d4a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet sentence-transformers langchain-chroma langchain langchain-openai langchain-huggingface > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c97eaaf2-34b7-4770-9949-e1abc4ca5226",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we embed some artificial documents and index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store. We will use [Hugging Face](/docs/integrations/text_embedding/huggingfacehub/) embeddings, but any LangChain vector store or embeddings model will suffice."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "49cbcd8e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -45,20 +55,14 @@
|
||||
" Document(page_content='This is just a random text.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, StuffDocumentsChain\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_transformers import (\n",
|
||||
" LongContextReorder,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Get embeddings.\n",
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
@@ -83,14 +87,22 @@
|
||||
"query = \"What can you tell me about the Celtics?\"\n",
|
||||
"\n",
|
||||
"# Get relevant documents ordered by relevance score\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "175d031a-43fa-42f4-93c4-2ba52c3c3ee5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that documents are returned in descending order of relevance to the query. The `LongContextReorder` document transformer will implement the re-ordering described above:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "34fb9d6e",
|
||||
"execution_count": 3,
|
||||
"id": "9a1181f2-a3dc-4614-9233-2196ab65939e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -108,12 +120,14 @@
|
||||
" Document(page_content='This is a document about the Boston Celtics')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_transformers import LongContextReorder\n",
|
||||
"\n",
|
||||
"# Reorder the documents:\n",
|
||||
"# Less relevant document will be at the middle of the list and more\n",
|
||||
"# relevant elements at beginning / end.\n",
|
||||
@@ -125,58 +139,54 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ceccab87",
|
||||
"cell_type": "markdown",
|
||||
"id": "a8d2ef0c-c397-4d8d-8118-3f7acf86d241",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe Celtics are referenced in four of the nine text extracts. They are mentioned as the favorite team of the author, the winner of a basketball game, a team with one of the best players, and a team with a specific player. Additionally, the last extract states that the document is about the Boston Celtics. This suggests that the Celtics are a basketball team, possibly from Boston, that is well-known and has had successful players and games in the past. '"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We prepare and run a custom Stuff chain with reordered docs as context.\n",
|
||||
"\n",
|
||||
"# Override prompts\n",
|
||||
"document_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"page_content\"], template=\"{page_content}\"\n",
|
||||
")\n",
|
||||
"document_variable_name = \"context\"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"stuff_prompt_override = \"\"\"Given this text extracts:\n",
|
||||
"-----\n",
|
||||
"{context}\n",
|
||||
"-----\n",
|
||||
"Please answer the following question:\n",
|
||||
"{query}\"\"\"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=stuff_prompt_override, input_variables=[\"context\", \"query\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Instantiate the chain\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"chain = StuffDocumentsChain(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" document_prompt=document_prompt,\n",
|
||||
" document_variable_name=document_variable_name,\n",
|
||||
")\n",
|
||||
"chain.run(input_documents=reordered_docs, query=query)"
|
||||
"Below, we show how to incorporate the re-ordered documents into a simple question-answering chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d4696a97",
|
||||
"execution_count": 5,
|
||||
"id": "8bbea705-d5b9-4ed5-9957-e12547283622",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"The Celtics are a professional basketball team and one of the most iconic franchises in the NBA. They are highly regarded and have a large fan base. The team has had many successful seasons and is often considered one of the top teams in the league. They have a strong history and have produced many great players, such as Larry Bird and L. Kornet. The team is based in Boston and is often referred to as the Boston Celtics.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"\n",
|
||||
"Given these texts:\n",
|
||||
"-----\n",
|
||||
"{context}\n",
|
||||
"-----\n",
|
||||
"Please answer the following question:\n",
|
||||
"{query}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=prompt_template,\n",
|
||||
" input_variables=[\"context\", \"query\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create and invoke the chain:\n",
|
||||
"chain = create_stuff_documents_chain(llm, prompt)\n",
|
||||
"response = chain.invoke({\"context\": reordered_docs, \"query\": query})\n",
|
||||
"print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -195,7 +205,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,33 +5,36 @@
|
||||
"id": "d9172545",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use the MultiVector Retriever\n",
|
||||
"# How to retrieve using multiple vectors per document\n",
|
||||
"\n",
|
||||
"It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
|
||||
"It can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document.\n",
|
||||
"\n",
|
||||
"LangChain implements a base [MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
|
||||
"\n",
|
||||
"The methods to create multiple vectors per document include:\n",
|
||||
"\n",
|
||||
"- Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever).\n",
|
||||
"- Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)).\n",
|
||||
"- Summary: create a summary for each document, embed that along with (or instead of) the document.\n",
|
||||
"- Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document.\n",
|
||||
"\n",
|
||||
"Note that this also enables another method of adding embeddings - manually. This is useful because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control.\n",
|
||||
"\n",
|
||||
"Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control."
|
||||
"Below we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/v0.2/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "09cecd95-3499-465a-895a-944627ffb77f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-chroma langchain langchain-openai > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "eed469be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "18c1421a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -40,25 +43,22 @@
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6d869496",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"loaders = [\n",
|
||||
" TextLoader(\"../../paul_graham_essay.txt\"),\n",
|
||||
" TextLoader(\"paul_graham_essay.txt\"),\n",
|
||||
" TextLoader(\"state_of_the_union.txt\"),\n",
|
||||
"]\n",
|
||||
"docs = []\n",
|
||||
"for loader in loaders:\n",
|
||||
" docs.extend(loader.load())\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)\n",
|
||||
"docs = text_splitter.split_documents(docs)"
|
||||
"docs = text_splitter.split_documents(docs)\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,52 +68,54 @@
|
||||
"source": [
|
||||
"## Smaller chunks\n",
|
||||
"\n",
|
||||
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood."
|
||||
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood.\n",
|
||||
"\n",
|
||||
"We will make a distinction between the vector store, which indexes embeddings of the (sub) documents, and the document store, which houses the \"parent\" documents and associates them with an identifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "0e7b6b45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"\n",
|
||||
"# The retriever (empty to start)\n",
|
||||
"retriever = MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "72a36491",
|
||||
"cell_type": "markdown",
|
||||
"id": "d4feded4-856a-4282-91c3-53aabc62e6ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The splitter to use to create smaller chunks\n",
|
||||
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)"
|
||||
"We next generate the \"sub\" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "5d23247d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The splitter to use to create smaller chunks\n",
|
||||
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n",
|
||||
"\n",
|
||||
"sub_docs = []\n",
|
||||
"for i, doc in enumerate(docs):\n",
|
||||
" _id = doc_ids[i]\n",
|
||||
@@ -123,9 +125,17 @@
|
||||
" sub_docs.extend(_sub_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e0634f8-90d5-4250-981a-5257c8a6d455",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we index the documents in our vector store and document store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "92ed5861",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -134,31 +144,46 @@
|
||||
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14c48c6d-850c-4317-9b6e-1ade92f2f710",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The vector store alone will retrieve small chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "8afed60c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '2fd77862-9ed5-4fad-bf76-e487b747b333', 'source': 'state_of_the_union.txt'})"
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '064eca46-a4c4-4789-8e3b-583f9597e54f', 'source': 'state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Vectorstore alone retrieves the small chunks\n",
|
||||
"retriever.vectorstore.similarity_search(\"justice breyer\")[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "717097c7-61d9-4306-8625-ef8f1940c127",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Whereas the retriever will return the larger parent document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "3c9017f1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -168,14 +193,13 @@
|
||||
"9875"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retriever returns larger chunks\n",
|
||||
"len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)"
|
||||
"len(retriever.invoke(\"justice breyer\")[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,12 +207,12 @@
|
||||
"id": "cdef8339-f9fa-4b3b-955f-ad9dbdf2734f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The default search type the retriever performs on the vector database is a similarity search. LangChain Vector Stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search) so if you want this instead you can just set the `search_type` property as follows:"
|
||||
"The default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"id": "36739460-a737-4a8e-b70f-50bf8c8eaae7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -198,7 +222,7 @@
|
||||
"9875"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -208,7 +232,7 @@
|
||||
"\n",
|
||||
"retriever.search_type = SearchType.mmr\n",
|
||||
"\n",
|
||||
"len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)"
|
||||
"len(retriever.invoke(\"justice breyer\")[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,14 +240,37 @@
|
||||
"id": "d6a7ae0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Summary\n",
|
||||
"## Associating summaries with a document for retrieval\n",
|
||||
"\n",
|
||||
"Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those."
|
||||
"A summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.\n",
|
||||
"\n",
|
||||
"We construct a simple [chain](/docs/how_to/sequence) that will receive an input [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"id": "6589291f-55bb-4e9a-b4ff-08f2506ed641",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1433dff4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -233,27 +280,26 @@
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "35b30390",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"doc\": lambda x: x.page_content}\n",
|
||||
" | ChatPromptTemplate.from_template(\"Summarize the following document:\\n\\n{doc}\")\n",
|
||||
" | ChatOpenAI(max_retries=0)\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3faa9fde-1b09-4849-a815-8b2e89c30a02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we can [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 10,
|
||||
"id": "41a2a738",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -261,9 +307,17 @@
|
||||
"summaries = chain.batch(docs, {\"max_concurrency\": 5})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73ef599e-140b-4905-8b62-6c52cdde1852",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can then initialize a `MultiVectorRetriever` as before, indexing the summaries in our vector store, and retaining the original documents in our document store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 11,
|
||||
"id": "7ac5e4b1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -279,29 +333,13 @@
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "0d93309f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]\n",
|
||||
"\n",
|
||||
"summary_docs = [\n",
|
||||
" Document(page_content=s, metadata={id_key: doc_ids[i]})\n",
|
||||
" for i, s in enumerate(summaries)\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "6d5edf0d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"]\n",
|
||||
"\n",
|
||||
"retriever.vectorstore.add_documents(summary_docs)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
|
||||
]
|
||||
@@ -320,50 +358,48 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "299232d6",
|
||||
"cell_type": "markdown",
|
||||
"id": "f0274892-29c1-4616-9040-d23f9d537526",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
|
||||
"Querying the vector store will return summaries:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "10e404c0",
|
||||
"execution_count": 12,
|
||||
"id": "299232d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content=\"The document is a speech given by President Biden addressing various issues and outlining his agenda for the nation. He highlights the importance of nominating a Supreme Court justice and introduces his nominee, Judge Ketanji Brown Jackson. He emphasizes the need to secure the border and reform the immigration system, including providing a pathway to citizenship for Dreamers and essential workers. The President also discusses the protection of women's rights, including access to healthcare and the right to choose. He calls for the passage of the Equality Act to protect LGBTQ+ rights. Additionally, President Biden discusses the need to address the opioid epidemic, improve mental health services, support veterans, and fight against cancer. He expresses optimism for the future of America and the strength of the American people.\", metadata={'doc_id': '56345bff-3ead-418c-a4ff-dff203f77474'})"
|
||||
"Document(page_content=\"President Biden recently nominated Judge Ketanji Brown Jackson to serve on the United States Supreme Court, emphasizing her qualifications and broad support. The President also outlined a plan to secure the border, fix the immigration system, protect women's rights, support LGBTQ+ Americans, and advance mental health services. He highlighted the importance of bipartisan unity in passing legislation, such as the Violence Against Women Act. The President also addressed supporting veterans, particularly those impacted by exposure to burn pits, and announced plans to expand benefits for veterans with respiratory cancers. Additionally, he proposed a plan to end cancer as we know it through the Cancer Moonshot initiative. President Biden expressed optimism about the future of America and emphasized the strength of the American people in overcoming challenges.\", metadata={'doc_id': '84015b1b-980e-400a-94d8-cf95d7e079bd'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")\n",
|
||||
"\n",
|
||||
"sub_docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "e4cce5c2",
|
||||
"cell_type": "markdown",
|
||||
"id": "e4f77ac5-2926-4f60-aad5-b2067900dff9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
|
||||
"Whereas the retriever will return the larger source document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "c8570dbb",
|
||||
"execution_count": 13,
|
||||
"id": "e4cce5c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -372,12 +408,14 @@
|
||||
"9194"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.invoke(\"justice breyer\")\n",
|
||||
"\n",
|
||||
"len(retrieved_docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
@@ -388,42 +426,28 @@
|
||||
"source": [
|
||||
"## Hypothetical Queries\n",
|
||||
"\n",
|
||||
"An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document. These questions can then be embedded"
|
||||
"An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document, which might bear close semantic similarity to relevant queries in a [RAG](/docs/tutorials/rag) application. These questions can then be embedded and associated with the documents to improve retrieval.\n",
|
||||
"\n",
|
||||
"Below, we use the [with_structured_output](/docs/how_to/structured_output/) method to structure the LLM output into a list of strings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "5219b085",
|
||||
"execution_count": 16,
|
||||
"id": "03d85234-c33a-4a43-861d-47328e1ec2ea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"functions = [\n",
|
||||
" {\n",
|
||||
" \"name\": \"hypothetical_questions\",\n",
|
||||
" \"description\": \"Generate hypothetical questions\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"questions\": {\n",
|
||||
" \"type\": \"array\",\n",
|
||||
" \"items\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"questions\"],\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "523deb92",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class HypotheticalQuestions(BaseModel):\n",
|
||||
" \"\"\"Generate hypothetical questions.\"\"\"\n",
|
||||
"\n",
|
||||
" questions: List[str] = Field(..., description=\"List of questions\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"doc\": lambda x: x.page_content}\n",
|
||||
@@ -431,28 +455,36 @@
|
||||
" | ChatPromptTemplate.from_template(\n",
|
||||
" \"Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\\n\\n{doc}\"\n",
|
||||
" )\n",
|
||||
" | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(\n",
|
||||
" functions=functions, function_call={\"name\": \"hypothetical_questions\"}\n",
|
||||
" | ChatOpenAI(max_retries=0, model=\"gpt-4o\").with_structured_output(\n",
|
||||
" HypotheticalQuestions\n",
|
||||
" )\n",
|
||||
" | JsonKeyOutputFunctionsParser(key_name=\"questions\")\n",
|
||||
" | (lambda x: x.questions)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6dddc40f-62af-413c-b944-f94a5e1f2f4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Invoking the chain on a single document demonstrates that it outputs a list of questions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 17,
|
||||
"id": "11d30554",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[\"What was the author's first experience with programming like?\",\n",
|
||||
" 'Why did the author switch their focus from AI to Lisp during their graduate studies?',\n",
|
||||
" 'What led the author to contemplate a career in art instead of computer science?']"
|
||||
"[\"What impact did the IBM 1401 have on the author's early programming experiences?\",\n",
|
||||
" \"How did the transition from using the IBM 1401 to microcomputers influence the author's programming journey?\",\n",
|
||||
" \"What role did Lisp play in shaping the author's understanding and approach to AI?\"]"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -462,22 +494,24 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "3eb2e48c",
|
||||
"cell_type": "markdown",
|
||||
"id": "dcffc572-7b20-4b77-857a-90ec360a8f7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})"
|
||||
"We can batch then batch the chain over all documents and assemble our vector store and document store as before:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 18,
|
||||
"id": "b2cd6e75",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Batch chain over documents to generate hypothetical questions\n",
|
||||
"hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings()\n",
|
||||
@@ -491,82 +525,67 @@
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "18831b3b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Generate Document objects from hypothetical questions\n",
|
||||
"question_docs = []\n",
|
||||
"for i, question_list in enumerate(hypothetical_questions):\n",
|
||||
" question_docs.extend(\n",
|
||||
" [Document(page_content=s, metadata={id_key: doc_ids[i]}) for s in question_list]\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "224b24c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever.vectorstore.add_documents(question_docs)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "7b442b90",
|
||||
"cell_type": "markdown",
|
||||
"id": "75cba8ab-a06f-4545-85fc-cf49d0204b5e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
|
||||
"Note that querying the underlying vector store will retrieve hypothetical questions that are semantically similar to the input query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "089b5ad0",
|
||||
"execution_count": 19,
|
||||
"id": "7b442b90",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Who has been nominated to serve on the United States Supreme Court?', metadata={'doc_id': '0b3a349e-c936-4e77-9c40-0a39fc3e07f0'}),\n",
|
||||
" Document(page_content=\"What was the context and content of Robert Morris' advice to the document's author in 2010?\", metadata={'doc_id': 'b2b2cdca-988a-4af1-ba47-46170770bc8c'}),\n",
|
||||
" Document(page_content='How did personal circumstances influence the decision to pass on the leadership of Y Combinator?', metadata={'doc_id': 'b2b2cdca-988a-4af1-ba47-46170770bc8c'}),\n",
|
||||
" Document(page_content='What were the reasons for the author leaving Yahoo in the summer of 1999?', metadata={'doc_id': 'ce4f4981-ca60-4f56-86f0-89466de62325'})]"
|
||||
"[Document(page_content='What might be the potential benefits of nominating Circuit Court of Appeals Judge Ketanji Brown Jackson to the United States Supreme Court?', metadata={'doc_id': '43292b74-d1b8-4200-8a8b-ea0cb57fbcdb'}),\n",
|
||||
" Document(page_content='How might the Bipartisan Infrastructure Law impact the economic competition between the U.S. and China?', metadata={'doc_id': '66174780-d00c-4166-9791-f0069846e734'}),\n",
|
||||
" Document(page_content='What factors led to the creation of Y Combinator?', metadata={'doc_id': '72003c4e-4cc9-4f09-a787-0b541a65b38c'}),\n",
|
||||
" Document(page_content='How did the ability to publish essays online change the landscape for writers and thinkers?', metadata={'doc_id': 'e8d2c648-f245-4bcc-b8d3-14e64a164b64'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")\n",
|
||||
"\n",
|
||||
"sub_docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "7594b24e",
|
||||
"cell_type": "markdown",
|
||||
"id": "63c32e43-5f4a-463b-a0c2-2101986f70e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
|
||||
"And invoking the retriever will return the corresponding document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "4c120c65",
|
||||
"execution_count": 20,
|
||||
"id": "7594b24e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -575,22 +594,15 @@
|
||||
"9194"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.invoke(\"justice breyer\")\n",
|
||||
"len(retrieved_docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "005072b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -609,7 +621,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -36,12 +36,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "ede7fdc0-ef31-483d-bd67-32e4b5c5d527",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-chroma bs4"
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain langchain-community langchain-chroma bs4"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -54,7 +55,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "143787ca-d8e6-4dc9-8281-4374f4d71720",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -62,7 +63,8 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
@@ -83,13 +85,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "07411adb-3722-4f65-ab7f-8f6f57663d11",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
"if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n",
|
||||
" os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -126,7 +129,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "cb58f273-2111-4a9b-8932-9b64c95030c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -157,13 +160,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "820244ae-74b4-4593-b392-822979dd91b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import bs4\n",
|
||||
"from langchain import hub\n",
|
||||
"from langchain.chains import create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
@@ -202,7 +204,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "2b685428-8b82-4af1-be4f-7232c5d55b73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -239,7 +241,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "4c4b1695-6217-4ee8-abaf-7cc26366d988",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -265,7 +267,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "afef4385-f571-4874-8f52-3d475642f579",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -314,7 +316,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -343,17 +345,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"id": "1046c92f-21b3-4214-907d-92878d8cba23",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in thinking step by step or exploring multiple reasoning possibilities at each step. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.'"
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -369,17 +371,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 11,
|
||||
"id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down complex tasks into smaller steps. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to guide the decomposition process effectively.'"
|
||||
"'Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -401,7 +403,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 12,
|
||||
"id": "7686b874-3a85-499f-82b5-28a85c4c768c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -411,11 +413,11 @@
|
||||
"text": [
|
||||
"User: What is Task Decomposition?\n",
|
||||
"\n",
|
||||
"AI: Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in thinking step by step or exploring multiple reasoning possibilities at each step. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.\n",
|
||||
"AI: Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.\n",
|
||||
"\n",
|
||||
"User: What are common ways of doing it?\n",
|
||||
"\n",
|
||||
"AI: Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down complex tasks into smaller steps. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to guide the decomposition process effectively.\n",
|
||||
"AI: Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
@@ -452,7 +454,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 13,
|
||||
"id": "71c32048-1a41-465f-a9e2-c4affc332fd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -552,17 +554,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 14,
|
||||
"id": "6d0a7a73-d151-47d9-9e99-b4f3291c0322",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. This process helps agents or models tackle difficult tasks by dividing them into more easily achievable subgoals. Task decomposition can be done through techniques like Chain of Thought or Tree of Thoughts, which guide the model in thinking step by step or exploring multiple reasoning possibilities at each step.'"
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help in decomposing hard tasks into multiple manageable tasks by instructing models to think step by step and explore multiple reasoning possibilities at each step. Task decomposition can be achieved through various methods such as using prompting techniques, task-specific instructions, or human inputs.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -578,17 +580,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 15,
|
||||
"id": "17021822-896a-4513-a17d-1d20b1c5381c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Common ways of task decomposition include using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide models in breaking down complex tasks into smaller steps. This can be achieved through simple prompting with LLMs, task-specific instructions, or human inputs to help the model understand and navigate the task effectively. Task decomposition aims to enhance model performance on complex tasks by utilizing more test-time computation and shedding light on the model's thinking process.\""
|
||||
"'Task decomposition can be done in common ways such as using prompting techniques like Chain of Thought (CoT) or Tree of Thoughts, which instruct models to think step by step and explore multiple reasoning possibilities at each step. Another way is to provide task-specific instructions, such as asking to \"Write a story outline\" for writing a novel, to guide the decomposition process. Additionally, task decomposition can also involve human inputs to break down complex tasks into smaller and simpler steps.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -618,7 +620,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 16,
|
||||
"id": "809cc747-2135-40a2-8e73-e4556343ee64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -646,14 +648,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 17,
|
||||
"id": "1726d151-4653-4c72-a187-a14840add526",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import chat_agent_executor\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(llm, tools)"
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -666,19 +668,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 18,
|
||||
"id": "52ae46d9-43f7-481b-96d5-df750be3ad65",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 5cd28d13-88dd-4eac-a465-3770ac27eff6, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_wxRrUmNbaNny8wh9JIb5uCRB', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-57ee0d12-6142-4957-a002-cce0093efe07-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_wxRrUmNbaNny8wh9JIb5uCRB'}])]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_TbhPPPN05GKi36HLeaN4QM90', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2e60d910-879a-4a2a-b1e9-6a6c5c7d7ebc-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_TbhPPPN05GKi36HLeaN4QM90'}])]}}\n",
|
||||
"----\n",
|
||||
"{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='9c3a17f7-653c-47fa-b4e4-fa3d8d24c85d', tool_call_id='call_wxRrUmNbaNny8wh9JIb5uCRB')]}}\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_TbhPPPN05GKi36HLeaN4QM90')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps agents in planning and executing tasks more effectively. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are instructed to think step by step to decompose hard tasks into manageable steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can better plan and execute tasks efficiently.\\n\\nIf you would like more detailed information or examples on task decomposition, feel free to ask!', response_metadata={'token_usage': {'completion_tokens': 154, 'prompt_tokens': 588, 'total_tokens': 742}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-8991fa20-c527-4f9e-a058-fc6264fe6259-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in transforming big tasks into multiple manageable tasks, making it easier for autonomous agents to handle and interpret the thinking process. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are instructed to \"think step by step\" to decompose hard tasks. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of multiple thoughts per step. Task decomposition can be facilitated through various methods such as using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 130, 'prompt_tokens': 636, 'total_tokens': 766}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-3ef17638-65df-4030-a7fe-795e6da91c69-0')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -707,7 +716,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 19,
|
||||
"id": "837a401e-9757-4d0e-a0da-24fa097d887e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -716,9 +725,7 @@
|
||||
"\n",
|
||||
"memory = SqliteSaver.from_conn_string(\":memory:\")\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" llm, tools, checkpointer=memory\n",
|
||||
")"
|
||||
"agent_executor = create_react_agent(llm, tools, checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -733,7 +740,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 20,
|
||||
"id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -741,7 +748,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-1451e59b-b135-4776-985d-4759338ffee5-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1cd17562-18aa-4839-b41b-403b17a0fc20-0')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -766,19 +773,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 21,
|
||||
"id": "e2c570ae-dd91-402c-8693-ae746de63b16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID c54381c0-c5d9-495a-91a0-aca4ae755663, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ab2x4iUPSWDAHS5txL7PspSK', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-f76b5813-b41c-4d0d-9ed2-667b988d885e-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_ab2x4iUPSWDAHS5txL7PspSK'}])]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-122bf097-7ff1-49aa-b430-e362b51354ad-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg'}])]}}\n",
|
||||
"----\n",
|
||||
"{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='e0895fa5-5d41-4be0-98db-10a83d42fc2f', tool_call_id='call_ab2x4iUPSWDAHS5txL7PspSK')]}}\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_rg7zKTE5e0ICxVSslJ1u9LMg')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in complex tasks where the task is broken down into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method for task decomposition is the Chain of Thought (CoT) technique, which prompts the model to think step by step and decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can better plan and execute complex tasks effectively.\\n\\nIf you would like more detailed information or examples related to task decomposition, feel free to ask!', response_metadata={'token_usage': {'completion_tokens': 165, 'prompt_tokens': 611, 'total_tokens': 776}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-13296566-8577-4d65-982b-a39718988ca3-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving intricate problems by dividing them into more manageable components. By decomposing tasks, agents or models can better understand the steps involved and plan their actions accordingly. Techniques like Chain of Thought (CoT) and Tree of Thoughts are examples of methods that enhance model performance on complex tasks by breaking them down into smaller steps.', response_metadata={'token_usage': {'completion_tokens': 87, 'prompt_tokens': 659, 'total_tokens': 746}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-b9166386-83e5-4b82-9a4b-590e5fa76671-0')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -805,7 +819,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 22,
|
||||
"id": "570d8c68-136e-4ba5-969a-03ba195f6118",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -813,11 +827,24 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7', 'function': {'arguments': '{\"query\":\"common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 930, 'total_tokens': 951}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dd842071-6dbd-4b68-8657-892eaca58638-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'common ways of task decomposition'}, 'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7'}])]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI', 'function': {'arguments': '{\"query\":\"Common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 769, 'total_tokens': 790}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2d2c8327-35cd-484a-b8fd-52436657c2d8-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Common ways of task decomposition'}, 'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI'}])]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 29553415-e0f4-41a9-8921-ba489e377f68, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_6kbxTU5CDWLmF9mrvR7bWSkI')]}}\n",
|
||||
"----\n",
|
||||
"{'action': {'messages': [ToolMessage(content='Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.', name='blog_post_retriever', id='c749bb8e-c8e0-4fa3-bc11-3e2e0651880b', tool_call_id='call_KvoiamnLfGEzMeEMlV3u0TJ7')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to the blog post, common ways of task decomposition include:\\n\\n1. Using language models with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Utilizing task-specific instructions, for example, using \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.\\n\\nThese methods help in breaking down complex tasks into smaller and more manageable steps, facilitating better planning and execution of the overall task.', response_metadata={'token_usage': {'completion_tokens': 100, 'prompt_tokens': 1475, 'total_tokens': 1575}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-98b765b3-f1a6-4c9a-ad0f-2db7950b900f-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Common ways of task decomposition include:\\n1. Using LLM with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Using task-specific instructions, for example, \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.', response_metadata={'token_usage': {'completion_tokens': 67, 'prompt_tokens': 1339, 'total_tokens': 1406}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ad14cde-ca75-4238-a868-f865e0fc50dd-0')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -852,20 +879,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 23,
|
||||
"id": "b1d2b4d4-e604-497d-873d-d345b808578e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import bs4\n",
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain.tools.retriever import create_retriever_tool\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from langgraph.checkpoint.sqlite import SqliteSaver\n",
|
||||
@@ -900,9 +922,7 @@
|
||||
"tools = [tool]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" llm, tools, checkpointer=memory\n",
|
||||
")"
|
||||
"agent_executor = create_react_agent(llm, tools, checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -941,7 +961,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "52976910",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [recursivecharactertextsplitter]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a678d550",
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_position: 0\n",
|
||||
"keywords: [Runnable, Runnables, LCEL]\n",
|
||||
"keywords: [Runnable, Runnables, RunnableSequence, LCEL, chain, chains, chaining]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -3,10 +3,14 @@
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "0bdb3b97-4989-4237-b43b-5943dbbd8302",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_position: 1.5\n",
|
||||
"keywords: [stream]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -3,10 +3,15 @@
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "27598444",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_position: 3\n",
|
||||
"keywords: [structured output, json, information extraction, with_structured_output]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -14,14 +14,20 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::info\n",
|
||||
":::info Tool calling vs function calling\n",
|
||||
"\n",
|
||||
"We use the term tool calling interchangeably with function calling. Although\n",
|
||||
"function calling is sometimes meant to refer to invocations of a single function,\n",
|
||||
"we treat all models as though they can return multiple tool or function calls in \n",
|
||||
"each message.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::info Supported models\n",
|
||||
"\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Tool calling allows a chat model to respond to a given prompt by \"calling a tool\".\n",
|
||||
"While the name implies that the model is performing \n",
|
||||
|
||||
256
docs/docs/how_to/tool_runtime.ipynb
Normal file
256
docs/docs/how_to/tool_runtime.ipynb
Normal file
@@ -0,0 +1,256 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass run time values to a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to create tools](/docs/how_to/custom_tools)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
"\n",
|
||||
"This how-to guide uses models with native tool calling capability.\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Using with LangGraph\n",
|
||||
"\n",
|
||||
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can bind them to chat models as follows:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/firefunction-v1\", temperature=0`}\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Passing request time information\n",
|
||||
"\n",
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
|
||||
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def update_favorite_pets(pets: List[str]) -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def delete_favorite_pets() -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Verify that the tools work correctly"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'eugene': ['cat', 'dog']}\n",
|
||||
"['cat', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
|
||||
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_pets.invoke({}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_message = handle_run_time_request(\n",
|
||||
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
|
||||
")\n",
|
||||
"ai_message.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/).\n",
|
||||
":::"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -110,7 +110,7 @@ with identify("user-123"):
|
||||
llm.invoke("Tell me a joke")
|
||||
|
||||
with identify("user-456", user_props={"email": "user456@test.com"}):
|
||||
agen.run("Who is Leo DiCaprio's girlfriend?")
|
||||
agent.run("Who is Leo DiCaprio's girlfriend?")
|
||||
```
|
||||
## Support
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -137,6 +137,77 @@
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c36575b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### LLM Caching with OpenSearch Semantic Cache\n",
|
||||
"\n",
|
||||
"Use OpenSearch as a semantic cache to cache prompts and responses and evaluate hits based on semantic similarity.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "375d4e56",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_aws import BedrockEmbeddings, ChatBedrock\n",
|
||||
"from langchain_community.cache import OpenSearchSemanticCache\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"bedrock_embeddings = BedrockEmbeddings(\n",
|
||||
" model_id=\"amazon.titan-embed-text-v1\", region_name=\"us-east-1\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat = ChatBedrock(\n",
|
||||
" model_id=\"anthropic.claude-3-haiku-20240307-v1:0\", model_kwargs={\"temperature\": 0.5}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Enable LLM cache. Make sure OpenSearch is set up and running. Update URL accordingly.\n",
|
||||
"set_llm_cache(\n",
|
||||
" OpenSearchSemanticCache(\n",
|
||||
" opensearch_url=\"http://localhost:9200\", embedding=bedrock_embeddings\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb5d25bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"messages = [HumanMessage(content=\"tell me about Amazon Bedrock\")]\n",
|
||||
"response_text = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"print(response_text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6cfb3086",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
|
||||
"# so it uses the cached result!\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(content=\"what is amazon bedrock\")]\n",
|
||||
"response_text = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"print(response_text)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -246,11 +246,220 @@
|
||||
"source": [
|
||||
"chain.invoke({\"product\": \"healthy snacks\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"### bind_tools()\n",
|
||||
"\n",
|
||||
"With `ChatEdenAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"llm = ChatEdenAI(provider=\"openai\", temperature=0.2, max_tokens=500)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', response_metadata={'openai': {'status': 'success', 'generated_text': None, 'message': [{'role': 'user', 'message': 'what is the weather like in San Francisco', 'tools': [{'name': 'GetWeather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'description': 'The city and state, e.g. San Francisco, CA', 'type': 'string'}}, 'required': ['location']}}], 'tool_calls': None}, {'role': 'assistant', 'message': None, 'tools': None, 'tool_calls': [{'id': 'call_tRpAO7KbQwgTjlka70mCQJdo', 'name': 'GetWeather', 'arguments': '{\"location\":\"San Francisco\"}'}]}], 'cost': 0.000194}}, id='run-5c44c01a-d7bb-4df6-835e-bda596080399-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco'}, 'id': 'call_tRpAO7KbQwgTjlka70mCQJdo'}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'San Francisco'},\n",
|
||||
" 'id': 'call_tRpAO7KbQwgTjlka70mCQJdo'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### with_structured_output()\n",
|
||||
"\n",
|
||||
"The BaseChatModel.with_structured_output interface makes it easy to get structured output from chat models. You can use ChatEdenAI.with_structured_output, which uses tool-calling under the hood), to get the model to more reliably return an output in a specific format:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"GetWeather(location='San Francisco')"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"structured_llm = llm.with_structured_output(GetWeather)\n",
|
||||
"structured_llm.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Passing Tool Results to model\n",
|
||||
"\n",
|
||||
"Here is a full example of how to use a tool. Pass the tool output to the model, and get the result back from the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'11 + 11 = 22'"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatEdenAI(\n",
|
||||
" provider=\"openai\",\n",
|
||||
" max_tokens=1000,\n",
|
||||
" temperature=0.2,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([add], tool_choice=\"required\")\n",
|
||||
"\n",
|
||||
"query = \"What is 11 + 11?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"\n",
|
||||
"tool_call = ai_msg.tool_calls[0]\n",
|
||||
"tool_output = add.invoke(tool_call[\"args\"])\n",
|
||||
"\n",
|
||||
"# This append the result from our tool to the model\n",
|
||||
"messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
"\n",
|
||||
"llm_with_tools.invoke(messages).content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Eden AI does not currently support streaming tool calls. Attempting to stream will yield a single final message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/eden/Projects/edenai-langchain/libs/community/langchain_community/chat_models/edenai.py:603: UserWarning: stream: Tool use is not yet supported in streaming mode.\n",
|
||||
" warnings.warn(\"stream: Tool use is not yet supported in streaming mode.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessageChunk(content='', id='run-fae32908-ec48-4ab2-ad96-bb0d0511754f', tool_calls=[{'name': 'add', 'args': {'a': 9, 'b': 9}, 'id': 'call_n0Tm7I9zERWa6UpxCAVCweLN'}], tool_call_chunks=[{'name': 'add', 'args': '{\"a\": 9, \"b\": 9}', 'id': 'call_n0Tm7I9zERWa6UpxCAVCweLN', 'index': 0}])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list(llm_with_tools.stream(\"What's 9 + 9\"))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain-pr",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
|
||||
@@ -58,6 +58,62 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `HuggingFacePipeline`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_huggingface import HuggingFacePipeline\n",
|
||||
"\n",
|
||||
"llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"HuggingFaceH4/zephyr-7b-beta\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" pipeline_kwargs=dict(\n",
|
||||
" max_new_tokens=512,\n",
|
||||
" do_sample=False,\n",
|
||||
" repetition_penalty=1.03,\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To run a quantized version, you might specify a `bitsandbytes` quantization config as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from transformers import BitsAndBytesConfig\n",
|
||||
"\n",
|
||||
"quantization_config = BitsAndBytesConfig(\n",
|
||||
" load_in_4bit=True,\n",
|
||||
" bnb_4bit_quant_type=\"nf4\",\n",
|
||||
" bnb_4bit_compute_dtype=\"float16\",\n",
|
||||
" bnb_4bit_use_double_quant=True\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"and pass it to the `HuggingFacePipeline` as a part of its `model_kwargs`:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"pipeline = HuggingFacePipeline(\n",
|
||||
" ...\n",
|
||||
"\n",
|
||||
" model_kwargs={\"quantization_config\": quantization_config},\n",
|
||||
" \n",
|
||||
" ...\n",
|
||||
")\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
1056
docs/docs/integrations/chat/llamacpp.ipynb
Normal file
1056
docs/docs/integrations/chat/llamacpp.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -54,12 +54,12 @@
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### directly in the terminal:\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"### via an API\n",
|
||||
"#### Via an API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
@@ -72,9 +72,11 @@
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### via LangChain\n",
|
||||
"#### Via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application."
|
||||
"See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application. \n",
|
||||
"\n",
|
||||
"View the [API Reference for ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html#langchain_community.chat_models.ollama.ChatOllama) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -105,7 +107,7 @@
|
||||
"\n",
|
||||
"# using LangChain Expressive Language chain syntax\n",
|
||||
"# learn more about the LCEL on\n",
|
||||
"# /docs/expression_language/why\n",
|
||||
"# /docs/concepts/#langchain-expression-language-lcel\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# for brevity, response is printed in terminal\n",
|
||||
@@ -189,7 +191,7 @@
|
||||
"\n",
|
||||
"## Building from source\n",
|
||||
"\n",
|
||||
"For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/jmorganca/ollama?tab=readme-ov-file#building)"
|
||||
"For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/ollama/ollama?tab=readme-ov-file#building)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -333,7 +335,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pip install --upgrade --quiet pillow"
|
||||
"!pip install --upgrade --quiet pillow"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -444,6 +446,24 @@
|
||||
"\n",
|
||||
"print(query_chain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"| [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain-openai](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Native streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
|
||||
@@ -179,10 +179,69 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform. \n",
|
||||
"> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Native RAG Support with Prem Repositories\n",
|
||||
"\n",
|
||||
"> Please note that the current version of ChatPremAI does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop). \n",
|
||||
"Prem Repositories which allows users to upload documents (.txt, .pdf etc) and connect those repositories to the LLMs. You can think Prem repositories as native RAG, where each repository can be considered as a vector database. You can connect multiple repositories. You can learn more about repositories [here](https://docs.premai.io/get-started/repositories).\n",
|
||||
"\n",
|
||||
"Repositories are also supported in langchain premai. Here is how you can do it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"what is the diameter of individual Galaxy\"\n",
|
||||
"repository_ids = [\n",
|
||||
" 1991,\n",
|
||||
"]\n",
|
||||
"repositories = dict(ids=repository_ids, similarity_threshold=0.3, limit=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we start by defining our repository with some repository ids. Make sure that the ids are valid repository ids. You can learn more about how to get the repository id [here](https://docs.premai.io/get-started/repositories). \n",
|
||||
"\n",
|
||||
"> Please note: Similar like `model_name` when you invoke the argument `repositories`, then you are potentially overriding the repositories connected in the launchpad. \n",
|
||||
"\n",
|
||||
"Now, we connect the repository with our chat object to invoke RAG based generations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"response = chat.invoke(query, max_tokens=100, repositories=repositories)\n",
|
||||
"\n",
|
||||
"print(response.content)\n",
|
||||
"print(json.dumps(response.response_metadata, indent=4))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> Ideally, you do not need to connect Repository IDs here to get Retrieval Augmented Generations. You can still get the same result if you have connected the repositories in prem platform. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"In this section, let's see how we can stream tokens using langchain and PremAI. Here's how you do it. "
|
||||
|
||||
@@ -47,7 +47,8 @@
|
||||
"source": [
|
||||
"api_key = \"xxx\"\n",
|
||||
"base_id = \"xxx\"\n",
|
||||
"table_id = \"xxx\""
|
||||
"table_id = \"xxx\"\n",
|
||||
"view = \"xxx\" # optional"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = AirtableLoader(api_key, table_id, base_id)\n",
|
||||
"loader = AirtableLoader(api_key, table_id, base_id, view=view)\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
|
||||
"\n",
|
||||
"urls = [\"https://www.wsj.com\"]\n",
|
||||
"loader = AsyncChromiumLoader(urls)\n",
|
||||
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0].page_content[0:100]"
|
||||
]
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"\n",
|
||||
">[Jupyter Notebook](https://en.wikipedia.org/wiki/Project_Jupyter#Applications) (formerly `IPython Notebook`) is a web-based interactive computational environment for creating notebook documents.\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from a `Jupyter notebook (.html)` into a format suitable by LangChain."
|
||||
"This notebook covers how to load data from a `Jupyter notebook (.ipynb)` into a format suitable by LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,7 +31,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = NotebookLoader(\n",
|
||||
" \"example_data/notebook.html\",\n",
|
||||
" \"example_data/notebook.ipynb\",\n",
|
||||
" include_outputs=True,\n",
|
||||
" max_output_length=20,\n",
|
||||
" remove_newline=True,\n",
|
||||
@@ -42,7 +42,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`NotebookLoader.load()` loads the `.html` notebook file into a `Document` object.\n",
|
||||
"`NotebookLoader.load()` loads the `.ipynb` notebook file into a `Document` object.\n",
|
||||
"\n",
|
||||
"**Parameters**:\n",
|
||||
"\n",
|
||||
|
||||
@@ -0,0 +1,387 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# DashScope Reranker\n",
|
||||
"\n",
|
||||
"This notebook shows how to use DashScope Reranker for document compression and retrieval. [DashScope](https://dashscope.aliyun.com/) is the generative AI service from Alibaba Cloud (Aliyun).\n",
|
||||
"\n",
|
||||
"DashScope's [Text ReRank Model](https://help.aliyun.com/document_detail/2780058.html?spm=a2c4g.2780059.0.0.6d995024FlrJ12) supports reranking documents with a maximum of 4000 tokens. Moreover, it supports Chinese, English, Japanese, Korean, Thai, Spanish, French, Portuguese, Indonesian, Arabic, and over 50 other languages. For more details, please visit [here](https://help.aliyun.com/document_detail/2780059.html?spm=a2c4g.2780058.0.0.3a9e5b1dWeOQjI)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet dashscope"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet faiss\n",
|
||||
"\n",
|
||||
"# OR (depending on Python version)\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss-cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To create api key: https://bailian.console.aliyun.com/?apiKey=1#/api-key\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"DASHSCOPE_API_KEY\"] = getpass.getpass(\"DashScope API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Helper function for printing docs\n",
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up the base vector store retriever\n",
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"I understand. \n",
|
||||
"\n",
|
||||
"I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n",
|
||||
"\n",
|
||||
"That’s why one of the first things I did as President was fight to pass the American Rescue Plan. \n",
|
||||
"\n",
|
||||
"Because people were hurting. We needed to act, and we did. \n",
|
||||
"\n",
|
||||
"Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n",
|
||||
"\n",
|
||||
"And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n",
|
||||
"\n",
|
||||
"Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 4:\n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 5:\n",
|
||||
"\n",
|
||||
"Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n",
|
||||
"\n",
|
||||
"The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \n",
|
||||
"\n",
|
||||
"We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 6:\n",
|
||||
"\n",
|
||||
"Every Administration says they’ll do it, but we are actually doing it. \n",
|
||||
"\n",
|
||||
"We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. \n",
|
||||
"\n",
|
||||
"But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 7:\n",
|
||||
"\n",
|
||||
"When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \n",
|
||||
"\n",
|
||||
"For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \n",
|
||||
"\n",
|
||||
"And I know you’re tired, frustrated, and exhausted. \n",
|
||||
"\n",
|
||||
"But I also know this.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 8:\n",
|
||||
"\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 9:\n",
|
||||
"\n",
|
||||
"My plan will not only lower costs to give families a fair shot, it will lower the deficit. \n",
|
||||
"\n",
|
||||
"The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. \n",
|
||||
"\n",
|
||||
"But in my administration, the watchdogs have been welcomed back. \n",
|
||||
"\n",
|
||||
"We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 10:\n",
|
||||
"\n",
|
||||
"He will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n",
|
||||
"\n",
|
||||
"We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n",
|
||||
"\n",
|
||||
"The pandemic has been punishing. \n",
|
||||
"\n",
|
||||
"And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n",
|
||||
"\n",
|
||||
"I understand.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 11:\n",
|
||||
"\n",
|
||||
"And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \n",
|
||||
"\n",
|
||||
"By the end of this year, the deficit will be down to less than half what it was before I took office. \n",
|
||||
"\n",
|
||||
"The only president ever to cut the deficit by more than one trillion dollars in a single year. \n",
|
||||
"\n",
|
||||
"Lowering your costs also means demanding more competition. \n",
|
||||
"\n",
|
||||
"I’m a capitalist, but capitalism without competition isn’t capitalism. \n",
|
||||
"\n",
|
||||
"It’s exploitation—and it drives up prices.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 12:\n",
|
||||
"\n",
|
||||
"Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n",
|
||||
"\n",
|
||||
"Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n",
|
||||
"\n",
|
||||
"Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. \n",
|
||||
"\n",
|
||||
"They keep moving. \n",
|
||||
"\n",
|
||||
"And the costs and the threats to America and the world keep rising.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 13:\n",
|
||||
"\n",
|
||||
"Cancer is the #2 cause of death in America–second only to heart disease. \n",
|
||||
"\n",
|
||||
"Last month, I announced our plan to supercharge \n",
|
||||
"the Cancer Moonshot that President Obama asked me to lead six years ago. \n",
|
||||
"\n",
|
||||
"Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. \n",
|
||||
"\n",
|
||||
"More support for patients and families. \n",
|
||||
"\n",
|
||||
"To get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 14:\n",
|
||||
"\n",
|
||||
"It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. \n",
|
||||
"\n",
|
||||
"Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \n",
|
||||
"\n",
|
||||
"And as my Dad used to say, it gave people a little breathing room.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 15:\n",
|
||||
"\n",
|
||||
"America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n",
|
||||
"\n",
|
||||
"These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n",
|
||||
"\n",
|
||||
"But I want you to know that we are going to be okay. \n",
|
||||
"\n",
|
||||
"When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 16:\n",
|
||||
"\n",
|
||||
"So that’s my plan. It will grow the economy and lower costs for families. \n",
|
||||
"\n",
|
||||
"So what are we waiting for? Let’s get this done. And while you’re at it, confirm my nominees to the Federal Reserve, which plays a critical role in fighting inflation. \n",
|
||||
"\n",
|
||||
"My plan will not only lower costs to give families a fair shot, it will lower the deficit.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 17:\n",
|
||||
"\n",
|
||||
"And we will, as one people. \n",
|
||||
"\n",
|
||||
"One America. \n",
|
||||
"\n",
|
||||
"The United States of America. \n",
|
||||
"\n",
|
||||
"May God bless you all. May God protect our troops.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 18:\n",
|
||||
"\n",
|
||||
"As I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n",
|
||||
"\n",
|
||||
"We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n",
|
||||
"\n",
|
||||
"And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 19:\n",
|
||||
"\n",
|
||||
"And I know you’re tired, frustrated, and exhausted. \n",
|
||||
"\n",
|
||||
"But I also know this. \n",
|
||||
"\n",
|
||||
"Because of the progress we’ve made, because of your resilience and the tools we have, tonight I can say \n",
|
||||
"we are moving forward safely, back to more normal routines. \n",
|
||||
"\n",
|
||||
"We’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. \n",
|
||||
"\n",
|
||||
"Just a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 20:\n",
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n",
|
||||
"\n",
|
||||
"Last year COVID-19 kept us apart. This year we are finally together again. \n",
|
||||
"\n",
|
||||
"Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n",
|
||||
"\n",
|
||||
"With a duty to one another to the American people to the Constitution. \n",
|
||||
"\n",
|
||||
"And with an unwavering resolve that freedom will always triumph over tyranny.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings.dashscope import DashScopeEmbeddings\n",
|
||||
"from langchain_community.vectorstores.faiss import FAISS\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"documents = TextLoader(\"../../how_to/state_of_the_union.txt\").load()\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"retriever = FAISS.from_documents(texts, DashScopeEmbeddings()).as_retriever( # type: ignore\n",
|
||||
" search_kwargs={\"k\": 20}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reranking with DashScopeRerank\n",
|
||||
"Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `DashScopeRerank` to rerank the returned results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 2:\n",
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n",
|
||||
"\n",
|
||||
"Last year COVID-19 kept us apart. This year we are finally together again. \n",
|
||||
"\n",
|
||||
"Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n",
|
||||
"\n",
|
||||
"With a duty to one another to the American people to the Constitution. \n",
|
||||
"\n",
|
||||
"And with an unwavering resolve that freedom will always triumph over tyranny.\n",
|
||||
"----------------------------------------------------------------------------------------------------\n",
|
||||
"Document 3:\n",
|
||||
"\n",
|
||||
"Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n",
|
||||
"\n",
|
||||
"The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \n",
|
||||
"\n",
|
||||
"We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"from langchain_community.document_compressors.dashscope_rerank import DashScopeRerank\n",
|
||||
"\n",
|
||||
"compressor = DashScopeRerank()\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -9,8 +9,7 @@
|
||||
"\n",
|
||||
">[Diffbot](https://docs.diffbot.com/docs/getting-started-with-diffbot) is a suite of ML-based products that make it easy to structure web data.\n",
|
||||
">\n",
|
||||
">Diffbot's [Natural Language Processing API](https://www.diffbot.com/products/natural-language/) allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.",
|
||||
"\n",
|
||||
">Diffbot's [Natural Language Processing API](https://www.diffbot.com/products/natural-language/) allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/graphs/diffbot.ipynb)\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
@@ -70,8 +69,8 @@
|
||||
"source": [
|
||||
"from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer\n",
|
||||
"\n",
|
||||
"diffbot_api_token = \"DIFFBOT_API_TOKEN\"\n",
|
||||
"diffbot_nlp = DiffbotGraphTransformer(diffbot_api_token=diffbot_api_token)"
|
||||
"diffbot_api_key = \"DIFFBOT_KEY\"\n",
|
||||
"diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=diffbot_api_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -111,7 +110,7 @@
|
||||
" --name neo4j \\\n",
|
||||
" -p 7474:7474 -p 7687:7687 \\\n",
|
||||
" -d \\\n",
|
||||
" -e NEO4J_AUTH=neo4j/pleaseletmein \\\n",
|
||||
" -e NEO4J_AUTH=neo4j/password \\\n",
|
||||
" -e NEO4J_PLUGINS=\\[\\\"apoc\\\"\\] \\\n",
|
||||
" neo4j:latest\n",
|
||||
"``` \n",
|
||||
@@ -129,7 +128,7 @@
|
||||
"\n",
|
||||
"url = \"bolt://localhost:7687\"\n",
|
||||
"username = \"neo4j\"\n",
|
||||
"password = \"pleaseletmein\"\n",
|
||||
"password = \"password\"\n",
|
||||
"\n",
|
||||
"graph = Neo4jGraph(url=url, username=username, password=password)"
|
||||
]
|
||||
@@ -296,7 +295,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "f36d938c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM Caching integrations\n",
|
||||
"# Model caches\n",
|
||||
"\n",
|
||||
"This notebook covers how to cache results of individual LLM calls using different caches."
|
||||
]
|
||||
@@ -121,6 +121,28 @@
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a31db5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To get response without prompt, you can bind `skip_prompt=True` with LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5e4aaad2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | hf.bind(skip_prompt=True)\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbbc3a37",
|
||||
|
||||
@@ -12,16 +12,15 @@
|
||||
"\n",
|
||||
"It optimizes setup and configuration details, including GPU usage.\n",
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/ollama/ollama#model-library).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"First, follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
|
||||
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
|
||||
" * e.g., `ollama pull llama3`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library) and pull to use locally with the command `ollama pull llama3`\n",
|
||||
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
|
||||
"\n",
|
||||
"> On Mac, the models will be download to `~/.ollama/models`\n",
|
||||
@@ -29,28 +28,29 @@
|
||||
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
|
||||
"\n",
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To view all pulled models on your local instance, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"* View the [Ollama documentation](https://github.com/ollama/ollama) for more commands. \n",
|
||||
"* Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` [interface](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
|
||||
"This includes [special tokens](https://ollama.com/library/llama3) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### directly in the terminal:\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"### via an API\n",
|
||||
"#### Via the API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
@@ -61,11 +61,20 @@
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"See the Ollama [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using Ollama chat model in your LangChain application."
|
||||
"See a typical basic example of using [Ollama chat model](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) in your LangChain application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -87,7 +96,9 @@
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(model=\"llama3\")\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama3\"\n",
|
||||
") # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `\n",
|
||||
"\n",
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
@@ -280,6 +291,24 @@
|
||||
"llm_with_image_context = bakllava.bind(images=[image_b64])\n",
|
||||
"llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade-strategy eager \"optimum[openvino,nncf]\" --quiet"
|
||||
"%pip install --upgrade-strategy eager \"optimum[openvino,nncf]\" langchain-huggingface --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -130,6 +130,28 @@
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "446a01e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To get response without prompt, you can bind `skip_prompt=True` with LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e3baeab2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | ov_llm.bind(skip_prompt=True)\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12524837-e9ab-455a-86be-66b95f4f893a",
|
||||
@@ -243,7 +265,8 @@
|
||||
" skip_prompt=True,\n",
|
||||
" skip_special_tokens=True,\n",
|
||||
")\n",
|
||||
"ov_llm.pipeline._forward_params = {\"streamer\": streamer, \"max_new_tokens\": 100}\n",
|
||||
"pipeline_kwargs = {\"pipeline_kwargs\": {\"streamer\": streamer, \"max_new_tokens\": 100}}\n",
|
||||
"chain = prompt | ov_llm.bind(**pipeline_kwargs)\n",
|
||||
"\n",
|
||||
"t1 = Thread(target=chain.invoke, args=({\"question\": question},))\n",
|
||||
"t1.start()\n",
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
keywords: [azure]
|
||||
---
|
||||
|
||||
# Microsoft
|
||||
|
||||
All functionality related to `Microsoft Azure` and other `Microsoft` products.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
keywords: [openai]
|
||||
---
|
||||
|
||||
# OpenAI
|
||||
|
||||
All functionality related to OpenAI
|
||||
|
||||
@@ -64,7 +64,7 @@ set_llm_cache(AstraDBCache(
|
||||
))
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/llms/llm_caching#astra-db-caches) (scroll to the Astra DB section).
|
||||
Learn more in the [example notebook](/docs/integrations/llm_caching#astra-db-caches) (scroll to the Astra DB section).
|
||||
|
||||
|
||||
## Semantic LLM Cache
|
||||
@@ -80,7 +80,7 @@ set_llm_cache(AstraDBSemanticCache(
|
||||
))
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/llms/llm_caching#astra-db-caches) (scroll to the appropriate section).
|
||||
Learn more in the [example notebook](/docs/integrations/llm_caching#astra-db-caches) (scroll to the appropriate section).
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/memory/astradb_chat_message_history).
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ from langchain_community.cache import CassandraCache
|
||||
set_llm_cache(CassandraCache())
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/llms/llm_caching#cassandra-caches) (scroll to the Cassandra section).
|
||||
Learn more in the [example notebook](/docs/integrations/llm_caching#cassandra-caches) (scroll to the Cassandra section).
|
||||
|
||||
|
||||
## Semantic LLM Cache
|
||||
@@ -54,7 +54,7 @@ set_llm_cache(CassandraSemanticCache(
|
||||
))
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/llms/llm_caching#cassandra-caches) (scroll to the appropriate section).
|
||||
Learn more in the [example notebook](/docs/integrations/llm_caching#cassandra-caches) (scroll to the appropriate section).
|
||||
|
||||
## Document loader
|
||||
|
||||
|
||||
@@ -48,6 +48,6 @@ eng = sqlalchemy.create_engine(conn_str)
|
||||
set_llm_cache(SQLAlchemyCache(engine=eng))
|
||||
```
|
||||
|
||||
From here, see the [LLM Caching](/docs/integrations/llms/llm_caching) documentation on how to use.
|
||||
From here, see the [LLM Caching](/docs/integrations/llm_caching) documentation on how to use.
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Ollama
|
||||
|
||||
>[Ollama](https://ollama.ai/) is a python library. It allows you to run open-source large language models,
|
||||
>[Ollama](https://ollama.com/) allows you to run open-source large language models,
|
||||
> such as LLaMA2, locally.
|
||||
>
|
||||
>`Ollama` bundles model weights, configuration, and data into a single package, defined by a Modelfile.
|
||||
@@ -12,11 +12,8 @@ on how to use `Ollama` with LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Follow [these instructions](https://github.com/jmorganca/ollama?tab=readme-ov-file#ollama)
|
||||
Follow [these instructions](https://github.com/ollama/ollama?tab=readme-ov-file#ollama)
|
||||
to set up and run a local Ollama instance.
|
||||
To use, you should set up the environment variables `ANYSCALE_API_BASE` and
|
||||
`ANYSCALE_API_KEY`.
|
||||
|
||||
|
||||
## LLM
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
keywords: [pinecone]
|
||||
---
|
||||
|
||||
# Pinecone
|
||||
|
||||
>[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality.
|
||||
|
||||
@@ -73,7 +73,76 @@ chat.invoke(
|
||||
|
||||
> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform.
|
||||
|
||||
> Please note that the current version of ChatPremAI does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop).
|
||||
> You can find all the optional parameters [here](https://docs.premai.io/get-started/sdk#optional-parameters). Any parameters other than [these supported parameters](https://docs.premai.io/get-started/sdk#optional-parameters) will be automatically removed before calling the model.
|
||||
|
||||
|
||||
### Native RAG Support with Prem Repositories
|
||||
|
||||
Prem Repositories which allows users to upload documents (.txt, .pdf etc) and connect those repositories to the LLMs. You can think Prem repositories as native RAG, where each repository can be considered as a vector database. You can connect multiple repositories. You can learn more about repositories [here](https://docs.premai.io/get-started/repositories).
|
||||
|
||||
Repositories are also supported in langchain premai. Here is how you can do it.
|
||||
|
||||
```python
|
||||
|
||||
query = "what is the diameter of individual Galaxy"
|
||||
repository_ids = [1991, ]
|
||||
repositories = dict(
|
||||
ids=repository_ids,
|
||||
similarity_threshold=0.3,
|
||||
limit=3
|
||||
)
|
||||
```
|
||||
|
||||
First we start by defining our repository with some repository ids. Make sure that the ids are valid repository ids. You can learn more about how to get the repository id [here](https://docs.premai.io/get-started/repositories).
|
||||
|
||||
> Please note: Similar like `model_name` when you invoke the argument `repositories`, then you are potentially overriding the repositories connected in the launchpad.
|
||||
|
||||
Now, we connect the repository with our chat object to invoke RAG based generations.
|
||||
|
||||
```python
|
||||
response = chat.invoke(query, max_tokens=100, repositories=repositories)
|
||||
|
||||
print(response.content)
|
||||
print(json.dumps(response.response_metadata, indent=4))
|
||||
```
|
||||
|
||||
This is how an output looks like.
|
||||
|
||||
```bash
|
||||
The diameters of individual galaxies range from 80,000-150,000 light-years.
|
||||
{
|
||||
"document_chunks": [
|
||||
{
|
||||
"repository_id": 1991,
|
||||
"document_id": 1307,
|
||||
"chunk_id": 173926,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.586126983165741,
|
||||
"content": "n thousands\n of light-years. The diameters of individual\n galaxies range from 80,000-150,000 light\n "
|
||||
},
|
||||
{
|
||||
"repository_id": 1991,
|
||||
"document_id": 1307,
|
||||
"chunk_id": 173925,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.4815782308578491,
|
||||
"content": " for development of galaxies. A galaxy contains\n a large number of stars. Galaxies spread over\n vast distances that are measured in thousands\n "
|
||||
},
|
||||
{
|
||||
"repository_id": 1991,
|
||||
"document_id": 1307,
|
||||
"chunk_id": 173916,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.38112708926200867,
|
||||
"content": " was separated from the from each other as the balloon expands.\n solar surface. As the passing star moved away, Similarly, the distance between the galaxies is\n the material separated from the solar surface\n continued to revolve around the sun and it\n slowly condensed into planets. Sir James Jeans\n and later Sir Harold Jeffrey supported thisnot to be republishedalso found to be increasing and thereby, the\n universe is"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
So, this also means that you do not need to make your own RAG pipeline when using the Prem Platform. Prem uses it's own RAG technology to deliver best in class performance for Retrieval Augmented Generations.
|
||||
|
||||
> Ideally, you do not need to connect Repository IDs here to get Retrieval Augmented Generations. You can still get the same result if you have connected the repositories in prem platform.
|
||||
|
||||
### Streaming
|
||||
|
||||
@@ -102,6 +171,8 @@ for chunk in chat.stream(
|
||||
|
||||
This will stream tokens one after the other.
|
||||
|
||||
> Please note: As of now, RAG with streaming is not supported. However we still support it with our API. You can learn more about that [here](https://docs.premai.io/get-started/chat-completion-sse).
|
||||
|
||||
## PremEmbeddings
|
||||
|
||||
In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key.
|
||||
|
||||
@@ -61,6 +61,22 @@ store = UpstashVectorStore(
|
||||
See [Upstash Vector documentation](https://upstash.com/docs/vector/features/embeddingmodels)
|
||||
for more detail on embedding models.
|
||||
|
||||
## Namespaces
|
||||
You can use namespaces to partition your data in the index. Namespaces are useful when you want to query over huge amount of data, and you want to partition the data to make the queries faster. When you use namespaces, there won't be post-filtering on the results which will make the query results more precise.
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores.upstash import UpstashVectorStore
|
||||
import os
|
||||
|
||||
os.environ["UPSTASH_VECTOR_REST_URL"] = "<UPSTASH_VECTOR_REST_URL>"
|
||||
os.environ["UPSTASH_VECTOR_REST_TOKEN"] = "<UPSTASH_VECTOR_REST_TOKEN>"
|
||||
|
||||
store = UpstashVectorStore(
|
||||
embedding=embeddings
|
||||
namespace="my_namespace"
|
||||
)
|
||||
```
|
||||
|
||||
### Inserting Vectors
|
||||
|
||||
```python
|
||||
|
||||
@@ -1,28 +1,38 @@
|
||||
# Vectara
|
||||
|
||||
>[Vectara](https://vectara.com/) is the trusted GenAI platform for developers. It provides a simple API to build GenAI applications
|
||||
> for semantic search or RAG (Retreieval augmented generation).
|
||||
>[Vectara](https://vectara.com/) provides a Trusted Generative AI platform, allowing organizations to rapidly create a ChatGPT-like experience (an AI assistant)
|
||||
> which is grounded in the data, documents, and knowledge that they have (technically, it is Retrieval-Augmented-Generation-as-a-service).
|
||||
|
||||
**Vectara Overview:**
|
||||
- `Vectara` is developer-first API platform for building trusted GenAI applications.
|
||||
- To use Vectara - first [sign up](https://vectara.com/integrations/langchain) and create an account. Then create a corpus and an API key for indexing and searching.
|
||||
- You can use Vectara's [indexing API](https://docs.vectara.com/docs/indexing-apis/indexing) to add documents into Vectara's index
|
||||
- You can use Vectara's [Search API](https://docs.vectara.com/docs/search-apis/search) to query Vectara's index (which also supports Hybrid search implicitly).
|
||||
`Vectara` is RAG-as-a-service, providing all the components of RAG behind an easy-to-use API, including:
|
||||
1. A way to extract text from files (PDF, PPT, DOCX, etc)
|
||||
2. ML-based chunking that provides state of the art performance.
|
||||
3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.
|
||||
4. Its own internal vector database where text chunks and embedding vectors are stored.
|
||||
5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments
|
||||
(including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and
|
||||
[MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))
|
||||
7. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.
|
||||
|
||||
For more information:
|
||||
- [Documentation](https://docs.vectara.com/docs/)
|
||||
- [API Playground](https://docs.vectara.com/docs/rest-api/)
|
||||
- [Quickstart](https://docs.vectara.com/docs/quickstart)
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
To use `Vectara` with LangChain no special installation steps are required.
|
||||
To get started, [sign up](https://vectara.com/integrations/langchain) and follow our [quickstart](https://docs.vectara.com/docs/quickstart) guide to create a corpus and an API key.
|
||||
Once you have these, you can provide them as arguments to the Vectara vectorstore, or you can set them as environment variables.
|
||||
To get started, [sign up](https://vectara.com/integrations/langchain) for a free Vectara account (if you don't already have one),
|
||||
and follow the [quickstart](https://docs.vectara.com/docs/quickstart) guide to create a corpus and an API key.
|
||||
Once you have these, you can provide them as arguments to the Vectara `vectorstore`, or you can set them as environment variables.
|
||||
|
||||
- export `VECTARA_CUSTOMER_ID`="your_customer_id"
|
||||
- export `VECTARA_CORPUS_ID`="your_corpus_id"
|
||||
- export `VECTARA_API_KEY`="your-vectara-api-key"
|
||||
|
||||
|
||||
## Vectara as a Vector Store
|
||||
|
||||
There exists a wrapper around the Vectara platform, allowing you to use it as a vectorstore, whether for semantic search or example selection.
|
||||
There exists a wrapper around the Vectara platform, allowing you to use it as a `vectorstore` in LangChain:
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
@@ -37,7 +47,10 @@ vectara = Vectara(
|
||||
vectara_api_key=api_key
|
||||
)
|
||||
```
|
||||
The customer_id, corpus_id and api_key are optional, and if they are not supplied will be read from the environment variables `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`, respectively.
|
||||
The `customer_id`, `corpus_id` and `api_key` are optional, and if they are not supplied will be read from
|
||||
the environment variables `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`, respectively.
|
||||
|
||||
### Adding Texts or Files
|
||||
|
||||
After you have the vectorstore, you can `add_texts` or `add_documents` as per the standard `VectorStore` interface, for example:
|
||||
|
||||
@@ -45,8 +58,8 @@ After you have the vectorstore, you can `add_texts` or `add_documents` as per th
|
||||
vectara.add_texts(["to be or not to be", "that is the question"])
|
||||
```
|
||||
|
||||
|
||||
Since Vectara supports file-upload, we also added the ability to upload files (PDF, TXT, HTML, PPT, DOC, etc) directly as file. When using this method, the file is uploaded directly to the Vectara backend, processed and chunked optimally there, so you don't have to use the LangChain document loader or chunking mechanism.
|
||||
Since Vectara supports file-upload in the platform, we also added the ability to upload files (PDF, TXT, HTML, PPT, DOC, etc) directly.
|
||||
When using this method, each file is uploaded directly to the Vectara backend, processed and chunked optimally there, so you don't have to use the LangChain document loader or chunking mechanism.
|
||||
|
||||
As an example:
|
||||
|
||||
@@ -54,9 +67,13 @@ As an example:
|
||||
vectara.add_files(["path/to/file1.pdf", "path/to/file2.pdf",...])
|
||||
```
|
||||
|
||||
To query the vectorstore, you can use the `similarity_search` method (or `similarity_search_with_score`), which takes a query string and returns a list of results:
|
||||
Of course you do not have to add any data, and instead just connect to an existing Vectara corpus where data may already be indexed.
|
||||
|
||||
### Querying the VectorStore
|
||||
|
||||
To query the Vectara vectorstore, you can use the `similarity_search` method (or `similarity_search_with_score`), which takes a query string and returns a list of results:
|
||||
```python
|
||||
results = vectara.similarity_score("what is LangChain?")
|
||||
results = vectara.similarity_search_with_score("what is LangChain?")
|
||||
```
|
||||
The results are returned as a list of relevant documents, and a relevance score of each document.
|
||||
|
||||
@@ -65,28 +82,101 @@ In this case, we used the default retrieval parameters, but you can also specify
|
||||
- `lambda_val`: the [lexical matching](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) factor for hybrid search (defaults to 0.025)
|
||||
- `filter`: a [filter](https://docs.vectara.com/docs/common-use-cases/filtering-by-metadata/filter-overview) to apply to the results (default None)
|
||||
- `n_sentence_context`: number of sentences to include before/after the actual matching segment when returning results. This defaults to 2.
|
||||
- `mmr_config`: can be used to specify MMR mode in the query.
|
||||
- `is_enabled`: True or False
|
||||
- `mmr_k`: number of results to use for MMR reranking
|
||||
- `diversity_bias`: 0 = no diversity, 1 = full diversity. This is the lambda parameter in the MMR formula and is in the range 0...1
|
||||
- `rerank_config`: can be used to specify reranker for thr results
|
||||
- `reranker`: mmr, rerank_multilingual_v1 or none. Note that "rerank_multilingual_v1" is a Scale only feature
|
||||
- `rerank_k`: number of results to use for reranking
|
||||
- `mmr_diversity_bias`: 0 = no diversity, 1 = full diversity. This is the lambda parameter in the MMR formula and is in the range 0...1
|
||||
|
||||
To get results without the relevance score, you can simply use the 'similarity_search' method:
|
||||
```python
|
||||
results = vectara.similarity_search("what is LangChain?")
|
||||
```
|
||||
|
||||
## Vectara for Retrieval Augmented Generation (RAG)
|
||||
|
||||
Vectara provides a full RAG pipeline, including generative summarization.
|
||||
To use this pipeline, you can specify the `summary_config` argument in `similarity_search` or `similarity_search_with_score` as follows:
|
||||
Vectara provides a full RAG pipeline, including generative summarization. To use it as a complete RAG solution, you can use the `as_rag` method.
|
||||
There are a few additional parameters that can be specified in the `VectaraQueryConfig` object to control retrieval and summarization:
|
||||
* k: number of results to return
|
||||
* lambda_val: the lexical matching factor for hybrid search
|
||||
* summary_config (optional): can be used to request an LLM summary in RAG
|
||||
- is_enabled: True or False
|
||||
- max_results: number of results to use for summary generation
|
||||
- response_lang: language of the response summary, in ISO 639-2 format (e.g. 'en', 'fr', 'de', etc)
|
||||
* rerank_config (optional): can be used to specify Vectara Reranker of the results
|
||||
- reranker: mmr, rerank_multilingual_v1 or none
|
||||
- rerank_k: number of results to use for reranking
|
||||
- mmr_diversity_bias: 0 = no diversity, 1 = full diversity.
|
||||
This is the lambda parameter in the MMR formula and is in the range 0...1
|
||||
|
||||
- `summary_config`: can be used to request an LLM summary in RAG
|
||||
- `is_enabled`: True or False
|
||||
- `max_results`: number of results to use for summary generation
|
||||
- `response_lang`: language of the response summary, in ISO 639-2 format (e.g. 'en', 'fr', 'de', etc)
|
||||
For example:
|
||||
|
||||
```python
|
||||
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
|
||||
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
|
||||
config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
|
||||
```
|
||||
Then you can use the `as_rag` method to create a RAG pipeline:
|
||||
|
||||
```python
|
||||
query_str = "what did Biden say?"
|
||||
|
||||
rag = vectara.as_rag(config)
|
||||
rag.invoke(query_str)['answer']
|
||||
```
|
||||
|
||||
The `as_rag` method returns a `VectaraRAG` object, which behaves just like any LangChain Runnable, including the `invoke` or `stream` methods.
|
||||
|
||||
## Vectara Chat
|
||||
|
||||
The RAG functionality can be used to create a chatbot. For example, you can create a simple chatbot that responds to user input:
|
||||
|
||||
```python
|
||||
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
|
||||
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
|
||||
config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
|
||||
|
||||
query_str = "what did Biden say?"
|
||||
bot = vectara.as_chat(config)
|
||||
bot.invoke(query_str)['answer']
|
||||
```
|
||||
|
||||
The main difference is the following: with `as_chat` Vectara internally tracks the chat history and conditions each response on the full chat history.
|
||||
There is no need to keep that history locally to LangChain, as Vectara will manage it internally.
|
||||
|
||||
## Vectara as a LangChain retriever only
|
||||
|
||||
If you want to use Vectara as a retriever only, you can use the `as_retriever` method, which returns a `VectaraRetriever` object.
|
||||
```python
|
||||
retriever = vectara.as_retriever(config=config)
|
||||
retriever.invoke(query_str)
|
||||
```
|
||||
|
||||
Like with as_rag, you provide a `VectaraQueryConfig` object to control the retrieval parameters.
|
||||
In most cases you would not enable the summary_config, but it is left as an option for backwards compatibility.
|
||||
If no summary is requested, the response will be a list of relevant documents, each with a relevance score.
|
||||
If a summary is requested, the response will be a list of relevant documents as before, plus an additional document that includes the generative summary.
|
||||
|
||||
## Hallucination Detection score
|
||||
|
||||
Vectara created [HHEM](https://huggingface.co/vectara/hallucination_evaluation_model) - an open source model that can be used to evaluate RAG responses for factual consistency.
|
||||
As part of the Vectara RAG, the "Factual Consistency Score" (or FCS), which is an improved version of the open source HHEM is made available via the API.
|
||||
This is automatically included in the output of the RAG pipeline
|
||||
|
||||
```python
|
||||
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
|
||||
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
|
||||
config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
|
||||
|
||||
rag = vectara.as_rag(config)
|
||||
resp = rag.invoke(query_str)
|
||||
print(resp['answer'])
|
||||
print(f"Vectara FCS = {resp['fcs']}")
|
||||
```
|
||||
|
||||
## Example Notebooks
|
||||
|
||||
For a more detailed examples of using Vectara, see the following examples:
|
||||
* [this notebook](/docs/integrations/vectorstores/vectara) shows how to use Vectara as a vectorstore for semantic search
|
||||
* [this notebook](/docs/integrations/providers/vectara/vectara_chat) shows how to build a chatbot with Langchain and Vectara
|
||||
* [this notebook](/docs/integrations/providers/vectara/vectara_summary) shows how to use the full Vectara RAG pipeline, including generative summarization
|
||||
For a more detailed examples of using Vectara with LangChain, see the following example notebooks:
|
||||
* [this notebook](/docs/integrations/vectorstores/vectara) shows how to use Vectara: with full RAG or just as a retriever.
|
||||
* [this notebook](/docs/integrations/retrievers/self_query/vectara_self_query) shows the self-query capability with Vectara.
|
||||
|
||||
|
||||
* [this notebook](/docs/integrations/providers/vectara/vectara_chat) shows how to build a chatbot with Langchain and Vectara
|
||||
|
||||
|
||||
@@ -5,7 +5,21 @@
|
||||
"id": "134a0785",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat Over Documents with Vectara"
|
||||
"# Vectara Chat\n",
|
||||
"\n",
|
||||
"[Vectara](https://vectara.com/) provides a Trusted Generative AI platform, allowing organizations to rapidly create a ChatGPT-like experience (an AI assistant) which is grounded in the data, documents, and knowledge that they have (technically, it is Retrieval-Augmented-Generation-as-a-service). \n",
|
||||
"\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))\n",
|
||||
"7. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use Vectara's [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) functionality."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,19 +27,19 @@
|
||||
"id": "56372c5b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps:\n",
|
||||
"1. [Sign up](https://www.vectara.com/integrations/langchain) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara account. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: customer ID, corpus ID and api_key.\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: `customer ID`, `corpus ID` and `api_key`.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
"> For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
@@ -36,20 +50,21 @@
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"2. Add them to the Vectara vectorstore constructor:\n",
|
||||
"2. Add them to the `Vectara` vectorstore constructor:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectorstore = Vectara(\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
"```"
|
||||
"```\n",
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "70c4e529",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -58,9 +73,16 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<YOUR_VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = \"<YOUR_VECTARA_CORPUS_ID>\"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"<YOUR_VECTARA_CUSTOMER_ID>\"\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
"from langchain_community.vectorstores.vectara import (\n",
|
||||
" RerankConfig,\n",
|
||||
" SummaryConfig,\n",
|
||||
" VectaraQueryConfig,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,62 +90,30 @@
|
||||
"id": "cdff94be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load in documents. You can replace this with a loader for whatever type of data you want"
|
||||
"## Vectara Chat Explained\n",
|
||||
"\n",
|
||||
"In most uses of LangChain to create chatbots, one must integrate a special `memory` component that maintains the history of chat sessions and then uses that history to ensure the chatbot is aware of conversation history.\n",
|
||||
"\n",
|
||||
"With Vectara Chat - all of that is performed in the backend by Vectara automatically. You can look at the [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) documentation for the details, to learn more about the internals of how this is implemented, but with LangChain all you have to do is turn that feature on in the Vectara vectorstore.\n",
|
||||
"\n",
|
||||
"Let's see an example. First we load the SOTU document (remember, text extraction and chunking all occurs automatically on the Vectara platform):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "01c46e92",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "239475d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since we're using Vectara, there's no need to chunk the documents, as that is done automatically in the Vectara platform backend. We just use `from_document()` to upload the text loaded from the file, and directly ingest it into Vectara:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a8930cf7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectara = Vectara.from_documents(documents, embedding=None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "898b574b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create a memory object, which is neccessary to track the inputs/outputs and hold a conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "af803fee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)"
|
||||
"vectara = Vectara.from_documents(documents, embedding=None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -131,139 +121,25 @@
|
||||
"id": "3c96b118",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now initialize the `ConversationalRetrievalChain`:"
|
||||
"And now we create a Chat Runnable using the `as_chat` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7b4110f3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '29486', 'len': '97'}), Document(page_content='Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '1083', 'len': '117'}), Document(page_content='All told, we created 369,000 new manufacturing jobs in America just last year. Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” It’s time. \\n\\nBut with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. Inflation is robbing them of the gains they might otherwise feel.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '14257', 'len': '77'}), Document(page_content='This is personal to me and Jill, to Kamala, and to so many of you. Cancer is the #2 cause of death in America–second only to heart disease. Last month, I announced our plan to supercharge \\nthe Cancer Moonshot that President Obama asked me to lead six years ago. Our goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. More support for patients and families.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '36196', 'len': '122'}), Document(page_content='Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. He met the Ukrainian people.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '664', 'len': '68'}), Document(page_content='I understand. \\n\\nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. That’s why one of the first things I did as President was fight to pass the American Rescue Plan. Because people were hurting. We needed to act, and we did.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '8042', 'len': '97'}), Document(page_content='He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '2100', 'len': '42'}), Document(page_content='He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. He met the Ukrainian people. From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. Groups of citizens blocking tanks with their bodies.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '788', 'len': '28'}), Document(page_content='Putin’s latest attack on Ukraine was premeditated and unprovoked. He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '2053', 'len': '46'}), Document(page_content='A unity agenda for the nation. We can do this. \\n\\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. We have fought for freedom, expanded liberty, defeated totalitarianism and terror. And built the strongest, freest, and most prosperous nation the world has ever known.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '36968', 'len': '131'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n",
|
||||
"llm = OpenAI(openai_api_key=openai_api_key, temperature=0)\n",
|
||||
"retriever = vectara.as_retriever()\n",
|
||||
"d = retriever.invoke(\"What did the president say about Ketanji Brown Jackson\", k=2)\n",
|
||||
"print(d)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "44ed803e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bot = ConversationalRetrievalChain.from_llm(\n",
|
||||
" llm, retriever, memory=memory, verbose=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5b6deb16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And can have a multi-turn conversation with out new bot:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "e8ce4fe9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = bot.invoke({\"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "4c79862b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and a former top litigator in private practice, and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "c697d9d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"Did he mention who she suceeded\"\n",
|
||||
"result = bot.invoke({\"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "ba0678f3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Ketanji Brown Jackson succeeded Justice Breyer on the United States Supreme Court.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3308b01-5300-4999-8cd3-22f16dae757e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pass in chat history\n",
|
||||
"\n",
|
||||
"In the above example, we used a Memory object to track chat history. We can also just pass it in explicitly. In order to do this, we need to initialize a chain without any memory object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 3,
|
||||
"id": "1b41a10b-bf68-4689-8f00-9aed7675e2ab",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bot = ConversationalRetrievalChain.from_llm(\n",
|
||||
" OpenAI(temperature=0), vectara.as_retriever()\n",
|
||||
")"
|
||||
"summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang=\"eng\")\n",
|
||||
"rerank_config = RerankConfig(reranker=\"mmr\", rerank_k=50, mmr_diversity_bias=0.2)\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"bot = vectara.as_chat(config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -276,39 +152,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 4,
|
||||
"id": "bc672290-8a8b-4828-a90c-f1bbdd6b3920",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = bot.invoke({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "6b62d758-c069-4062-88f0-21e7ea4710bf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and a former top litigator in private practice, and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
"'The President expressed gratitude to Justice Breyer and highlighted the significance of nominating Ketanji Brown Jackson to the Supreme Court, praising her legal expertise and commitment to upholding excellence [1]. The President also reassured the public about the situation with gas prices and the conflict in Ukraine, emphasizing unity with allies and the belief that the world will emerge stronger from these challenges [2][4]. Additionally, the President shared personal experiences related to economic struggles and the importance of passing the American Rescue Plan to support those in need [3]. The focus was also on job creation and economic growth, acknowledging the impact of inflation on families [5]. While addressing cancer as a significant issue, the President discussed plans to enhance cancer research and support for patients and families [7].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
"bot.invoke(\"What did the president say about Ketanji Brown Jackson?\")[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -321,256 +183,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 5,
|
||||
"id": "9c95460b-7116-4155-a9d2-c0fb027ee592",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_history = [(query, result[\"answer\"])]\n",
|
||||
"query = \"Did he mention who she suceeded\"\n",
|
||||
"result = bot.invoke({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "698ac00c-cadc-407f-9423-226b2d9258d0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Ketanji Brown Jackson succeeded Justice Breyer on the United States Supreme Court.'"
|
||||
"\"In his remarks, the President specified that Ketanji Brown Jackson is succeeding Justice Breyer on the United States Supreme Court[1]. The President praised Jackson as a top legal mind who will continue Justice Breyer's legacy of excellence. The nomination of Jackson was highlighted as a significant constitutional responsibility of the President[1]. The President emphasized the importance of this nomination and the qualities that Jackson brings to the role. The focus was on the transition from Justice Breyer to Judge Ketanji Brown Jackson on the Supreme Court[1].\""
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0eaadf0f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return Source Documents\n",
|
||||
"You can also easily return source documents from the ConversationalRetrievalChain. This is useful for when you want to inspect what documents were returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "562769c6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bot = ConversationalRetrievalChain.from_llm(\n",
|
||||
" llm, vectara.as_retriever(), return_source_documents=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "ea478300",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = bot.invoke({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "4cb75b4e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '29486', 'len': '97'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"source_documents\"][0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "99b96dae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationalRetrievalChain with `map_reduce`\n",
|
||||
"LangChain supports different types of ways to combine document chains with the ConversationalRetrievalChain chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "e53a9d66",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "bf205e35",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"\n",
|
||||
"chain = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectara.as_retriever(),\n",
|
||||
" question_generator=question_generator,\n",
|
||||
" combine_docs_chain=doc_chain,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "78155887",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = chain({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "e54b5fa2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, who is one of the nation's top legal minds and a former top litigator in private practice.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a2fe6b14",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationalRetrievalChain with Question Answering with sources\n",
|
||||
"\n",
|
||||
"You can also use this chain with the question answering with sources chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "d1058fd2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.qa_with_sources import load_qa_with_sources_chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "a6594482",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_with_sources_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"\n",
|
||||
"chain = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectara.as_retriever(),\n",
|
||||
" question_generator=question_generator,\n",
|
||||
" combine_docs_chain=doc_chain,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "e2badd21",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = chain({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "edb31fe5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and a former top litigator in private practice.\\nSOURCES: langchain\""
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
"bot.invoke(\"Did he mention who she suceeded?\")[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -578,157 +209,40 @@
|
||||
"id": "2324cdc6-98bf-4708-b8cd-02a98b1e5b67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationalRetrievalChain with streaming to `stdout`\n",
|
||||
"## Chat with streaming\n",
|
||||
"\n",
|
||||
"Output from the chain will be streamed to `stdout` token by token in this example."
|
||||
"Of course the chatbot interface also supports streaming.\n",
|
||||
"Instead of the `invoke` method you simply use `stream`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "2efacec3-2690-4b05-8de3-a32fd2ac3911",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversational_retrieval.prompts import (\n",
|
||||
" CONDENSE_QUESTION_PROMPT,\n",
|
||||
" QA_PROMPT,\n",
|
||||
")\n",
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n",
|
||||
"# and a separate, non-streaming llm for question generation\n",
|
||||
"llm = OpenAI(temperature=0, openai_api_key=openai_api_key)\n",
|
||||
"streaming_llm = OpenAI(\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
" temperature=0,\n",
|
||||
" openai_api_key=openai_api_key,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
|
||||
"doc_chain = load_qa_chain(streaming_llm, chain_type=\"stuff\", prompt=QA_PROMPT)\n",
|
||||
"\n",
|
||||
"bot = ConversationalRetrievalChain(\n",
|
||||
" retriever=vectara.as_retriever(),\n",
|
||||
" combine_docs_chain=doc_chain,\n",
|
||||
" question_generator=question_generator,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "fd6d43f4-7428-44a4-81bc-26fe88a98762",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and a former top litigator in private practice, and that she will continue Justice Breyer's legacy of excellence."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = bot.invoke({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "5ab38978-f3e8-4fa7-808c-c79dec48379a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Ketanji Brown Jackson succeeded Justice Breyer on the United States Supreme Court."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_history = [(query, result[\"answer\"])]\n",
|
||||
"query = \"Did he mention who she suceeded\"\n",
|
||||
"result = bot.invoke({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f793d56b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## get_chat_history Function\n",
|
||||
"You can also specify a `get_chat_history` function, which can be used to format the chat_history string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "a7ba9d8c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_chat_history(inputs) -> str:\n",
|
||||
" res = []\n",
|
||||
" for human, ai in inputs:\n",
|
||||
" res.append(f\"Human:{human}\\nAI:{ai}\")\n",
|
||||
" return \"\\n\".join(res)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"bot = ConversationalRetrievalChain.from_llm(\n",
|
||||
" llm, vectara.as_retriever(), get_chat_history=get_chat_history\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "a3e33c0d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_history = []\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = bot.invoke({\"question\": query, \"chat_history\": chat_history})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"execution_count": 6,
|
||||
"id": "936dc62f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and a former top litigator in private practice, and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Judge Ketanji Brown Jackson is a nominee for the United States Supreme Court, known for her legal expertise and experience as a former litigator. She is praised for her potential to continue the legacy of excellence on the Court[1]. While the search results provide information on various topics like innovation, economic growth, and healthcare initiatives, they do not directly address Judge Ketanji Brown Jackson's specific accomplishments. Therefore, I do not have enough information to answer this question."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"answer\"]"
|
||||
"output = {}\n",
|
||||
"curr_key = None\n",
|
||||
"for chunk in bot.stream(\"what about her accopmlishments?\"):\n",
|
||||
" for key in chunk:\n",
|
||||
" if key not in output:\n",
|
||||
" output[key] = chunk[key]\n",
|
||||
" else:\n",
|
||||
" output[key] += chunk[key]\n",
|
||||
" if key == \"answer\":\n",
|
||||
" print(chunk[key], end=\"\", flush=True)\n",
|
||||
" curr_key = key"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -748,7 +262,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
"version": "3.11.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,311 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "559f8e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vectara\n",
|
||||
"\n",
|
||||
">[Vectara](https://vectara.com/) is the trusted GenAI platform that provides an easy-to-use API for document indexing and querying. \n",
|
||||
"\n",
|
||||
"Vectara provides an end-to-end managed service for Retrieval Augmented Generation or [RAG](https://vectara.com/grounded-generation/), which includes:\n",
|
||||
"\n",
|
||||
"1. A way to extract text from document files and chunk them into sentences.\n",
|
||||
"\n",
|
||||
"2. The state-of-the-art [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model. Each text chunk is encoded into a vector embedding using Boomerang, and stored in the Vectara internal knowledge (vector+text) store\n",
|
||||
"\n",
|
||||
"3. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))\n",
|
||||
"\n",
|
||||
"4. An option to create [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents, including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Vectara`'s integration with langchain.\n",
|
||||
"Specificaly we will demonstrate how to use chaining with [LangChain's Expression Language](/docs/concepts#langchain-expression-language) and using Vectara's integrated summarization capability."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e97dcf11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps:\n",
|
||||
"\n",
|
||||
"1. [Sign up](https://www.vectara.com/integrations/langchain) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: customer ID, corpus ID and api_key.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
"> For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"2. Add them to the Vectara vectorstore constructor:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectorstore = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "aac7a9a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import FakeEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "875ffb7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we load the state-of-the-union text into Vectara. Note that we use the `from_files` interface which does not require any local processing or chunking - Vectara receives the file content and performs all the necessary pre-processing, chunking and embedding of the file into its knowledge store."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "be0a4973",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectara = Vectara.from_files([\"state_of_the_union.txt\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22a6b953",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now create a Vectara retriever and specify that:\n",
|
||||
"* It should return only the 3 top Document matches\n",
|
||||
"* For summary, it should use the top 5 results and respond in English"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "19cd2f86",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summary_config = {\"is_enabled\": True, \"max_results\": 5, \"response_lang\": \"eng\"}\n",
|
||||
"retriever = vectara.as_retriever(\n",
|
||||
" search_kwargs={\"k\": 3, \"summary_config\": summary_config}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c49284ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"When using summarization with Vectara, the retriever responds with a list of `Document` objects:\n",
|
||||
"1. The first `k` documents are the ones that match the query (as we are used to with a standard vector store)\n",
|
||||
"2. With summary enabled, an additional `Document` object is apended, which includes the summary text. This Document has the metadata field `summary` set as True.\n",
|
||||
"\n",
|
||||
"Let's define two utility functions to split those out:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e5100654",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_sources(documents):\n",
|
||||
" return documents[:-1]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_summary(documents):\n",
|
||||
" return documents[-1].page_content\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"query_str = \"what did Biden say?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2a74368",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can try a summary response for the query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ee4759c4",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The returned results did not contain sufficient information to be summarized into a useful answer for your query. Please try a different search or restate your query differently.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"(retriever | get_summary).invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd7c4593",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And if we would like to see the sources retrieved from Vectara that were used in this summary (the citations):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0eb66034",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.', metadata={'lang': 'eng', 'section': '1', 'offset': '34652', 'len': '60', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value.', metadata={'lang': 'eng', 'section': '1', 'offset': '3807', 'len': '42', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully.', metadata={'lang': 'eng', 'section': '1', 'offset': '2100', 'len': '42', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"(retriever | get_sources).invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f16bf8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Vectara's \"RAG as a service\" does a lot of the heavy lifting in creating question answering or chatbot chains. The integration with LangChain provides the option to use additional capabilities such as query pre-processing like `SelfQueryRetriever` or `MultiQueryRetriever`. Let's look at an example of using the [MultiQueryRetriever](/docs/how_to/MultiQueryRetriever).\n",
|
||||
"\n",
|
||||
"Since MQR uses an LLM we have to set that up - here we choose `ChatOpenAI`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e14325b9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"President Biden has made several notable quotes and comments. He expressed a commitment to investigate the potential impact of burn pits on soldiers' health, referencing his son's brain cancer [1]. He emphasized the importance of unity among Americans, urging us to see each other as fellow citizens rather than enemies [2]. Biden also highlighted the need for schools to use funds from the American Rescue Plan to hire teachers and address learning loss, while encouraging community involvement in supporting education [3].\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"mqr = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)\n",
|
||||
"\n",
|
||||
"(mqr | get_summary).invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "fa14f923",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.', metadata={'lang': 'eng', 'section': '1', 'offset': '34652', 'len': '60', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value.', metadata={'lang': 'eng', 'section': '1', 'offset': '3807', 'len': '42', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='And, if Congress provides the funds we need, we’ll have new stockpiles of tests, masks, and pills ready if needed. I cannot promise a new variant won’t come. But I can promise you we’ll do everything within our power to be ready if it does. Third – we can end the shutdown of schools and businesses. We have the tools we need.', metadata={'lang': 'eng', 'section': '1', 'offset': '24753', 'len': '82', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='The returned results did not contain sufficient information to be summarized into a useful answer for your query. Please try a different search or restate your query differently.', metadata={'summary': True}),\n",
|
||||
" Document(page_content='Danielle says Heath was a fighter to the very end. He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are. The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits.', metadata={'lang': 'eng', 'section': '1', 'offset': '35502', 'len': '58', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='Let’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun.', metadata={'lang': 'eng', 'section': '1', 'offset': '26312', 'len': '89', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='The American Rescue Plan gave schools money to hire teachers and help students make up for lost learning. I urge every parent to make sure your school does just that. And we can all play a part—sign up to be a tutor or a mentor. Children were also struggling before the pandemic. Bullying, violence, trauma, and the harms of social media.', metadata={'lang': 'eng', 'section': '1', 'offset': '33227', 'len': '61', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"(mqr | get_sources).invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "16853820",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -20,7 +20,7 @@
|
||||
"\n",
|
||||
"I have used the cloud version of Milvus, thus I need `uri` and `token` as well.\n",
|
||||
"\n",
|
||||
"NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`). We also need the `pymilvus` package."
|
||||
"NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`). We also need the `langchain_milvus` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,16 +29,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet lark"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet pymilvus"
|
||||
"%pip install --upgrade --quiet lark langchain_milvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -67,8 +58,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Milvus\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_milvus.vectorstores import Milvus\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
@@ -388,4 +379,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
@@ -5,15 +5,17 @@
|
||||
"id": "13afcae7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vectara \n",
|
||||
"# Vectara self-querying \n",
|
||||
"\n",
|
||||
">[Vectara](https://vectara.com/) is the trusted GenAI platform that provides an easy-to-use API for document indexing and querying. \n",
|
||||
">\n",
|
||||
">`Vectara` provides an end-to-end managed service for `Retrieval Augmented Generation` or [RAG](https://vectara.com/grounded-generation/), which includes:\n",
|
||||
">1. A way to `extract text` from document files and `chunk` them into sentences.\n",
|
||||
">2. The state-of-the-art [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model. Each text chunk is encoded into a vector embedding using `Boomerang`, and stored in the Vectara internal knowledge (vector+text) store\n",
|
||||
">3. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))\n",
|
||||
">4. An option to create [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents, including citations.\n",
|
||||
"[Vectara](https://vectara.com/) provides a Trusted Generative AI platform, allowing organizations to rapidly create a ChatGPT-like experience (an AI assistant) which is grounded in the data, documents, and knowledge that they have (technically, it is Retrieval-Augmented-Generation-as-a-service). \n",
|
||||
"\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))\n",
|
||||
"7. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"\n",
|
||||
@@ -25,19 +27,19 @@
|
||||
"id": "68e75fb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"You will need a `Vectara` account to use `Vectara` with `LangChain`. To get started, use the following steps (see our [quickstart](https://docs.vectara.com/docs/quickstart) guide):\n",
|
||||
"1. [Sign up](https://console.vectara.com/signup) for a `Vectara` account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingesting from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara account. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you need three values: customer ID, corpus ID and api_key.\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: `customer ID`, `corpus ID` and `api_key`.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
"> For example, you can set these variables using `os.environ` and `getpass` as follows:\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
@@ -48,17 +50,18 @@
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"1. Provide them as arguments when creating the `Vectara` vectorstore object:\n",
|
||||
"2. Add them to the `Vectara` vectorstore constructor:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectorstore = Vectara(\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
"```\n",
|
||||
"In this notebook we assume they are provided in the environment.\n",
|
||||
"\n",
|
||||
"**Note:** The self-query retriever requires you to have `lark` installed (`pip install lark`). "
|
||||
"**Notes:** The self-query retriever requires you to have `lark` installed (`pip install lark`). "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,34 +71,44 @@
|
||||
"source": [
|
||||
"## Connecting to Vectara from LangChain\n",
|
||||
"\n",
|
||||
"In this example, we assume that you've created an account and a corpus, and added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and VECTARA_API_KEY (created with permissions for both indexing and query) as environment variables.\n",
|
||||
"In this example, we assume that you've created an account and a corpus, and added your `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY` (created with permissions for both indexing and query) as environment variables.\n",
|
||||
"\n",
|
||||
"The corpus has 4 fields defined as metadata for filtering: year, director, rating, and genre\n"
|
||||
"We further assume the corpus has 4 fields defined as filterable metadata attributes: `year`, `director`, `rating`, and `genre`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9d3aa44f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<YOUR_VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = \"<YOUR_VECTARA_CORPUS_ID>\"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"<YOUR_VECTARA_CUSTOMER_ID>\"\n",
|
||||
"\n",
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13a6be33-de3c-4628-acc8-b94102c275b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dataset\n",
|
||||
"\n",
|
||||
"We first define an example dataset of movie, and upload those to the corpus, along with the metadata:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb4a5787",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings import FakeEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "bcbe04d9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -136,11 +149,7 @@
|
||||
"\n",
|
||||
"vectara = Vectara()\n",
|
||||
"for doc in docs:\n",
|
||||
" vectara.add_texts(\n",
|
||||
" [doc.page_content],\n",
|
||||
" embedding=FakeEmbeddings(size=768),\n",
|
||||
" doc_metadata=doc.metadata,\n",
|
||||
" )"
|
||||
" vectara.add_texts([doc.page_content], doc_metadata=doc.metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -148,23 +157,21 @@
|
||||
"id": "5ecaab6d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating our self-querying retriever\n",
|
||||
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
|
||||
"## Creating the self-querying retriever\n",
|
||||
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents.\n",
|
||||
"\n",
|
||||
"We then provide an llm (in this case OpenAI) and the `vectara` vectorstore as arguments:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "86e34dbf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"metadata_field_info = [\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"genre\",\n",
|
||||
@@ -186,7 +193,7 @@
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"document_content_description = \"Brief summary of a movie\"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4o\", max_tokens=4069)\n",
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm, vectara, document_content_description, metadata_field_info, verbose=True\n",
|
||||
")"
|
||||
@@ -197,13 +204,13 @@
|
||||
"id": "ea9df8d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Testing it out\n",
|
||||
"## Self-retrieval Queries\n",
|
||||
"And now we can try actually using our retriever!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "38a126e9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -211,26 +218,26 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'lang': 'eng', 'offset': '0', 'len': '76', 'year': '2010', 'director': 'Christopher Nolan', 'rating': '8.2', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'lang': 'eng', 'offset': '0', 'len': '82', 'year': '2019', 'director': 'Greta Gerwig', 'rating': '8.3', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
" Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'lang': 'eng', 'offset': '0', 'len': '76', 'year': '2010', 'director': 'Christopher Nolan', 'rating': '8.2', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.invoke(\"What are some movies about dinosaurs\")"
|
||||
"retriever.invoke(\"What are movies about scientists\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "fc3f1e6e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -241,7 +248,7 @@
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -253,7 +260,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 6,
|
||||
"id": "b19d4da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -263,7 +270,7 @@
|
||||
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'lang': 'eng', 'offset': '0', 'len': '82', 'year': '2019', 'director': 'Greta Gerwig', 'rating': '8.3', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -275,17 +282,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "f900e40e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
"[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -297,17 +305,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "12a51522",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'})]"
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -333,7 +342,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -350,9 +359,17 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "00e8baad-a9d7-4498-bd8d-ca41d0691386",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is cool, we can include the number of results we would like to see in the query and the self retriever would correctly understand it. For example, let's look for "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 10,
|
||||
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -361,19 +378,27 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'})]"
|
||||
"[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.invoke(\"what are two movies about dinosaurs\")"
|
||||
"retriever.invoke(\"what are two movies with a rating above 8.5\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ed4b9dbc-e3cd-442d-b108-705295f51fa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -392,7 +417,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.11.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
139
docs/docs/integrations/stores/elasticsearch.ipynb
Normal file
139
docs/docs/integrations/stores/elasticsearch.ipynb
Normal file
@@ -0,0 +1,139 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Elasticsearch \n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ElasticsearchEmbeddingsCache\n",
|
||||
"\n",
|
||||
"The `ElasticsearchEmbeddingsCache` is a `ByteStore` implementation that uses your Elasticsearch instance for efficient storage and retrieval of embeddings.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"First install the LangChain integration with Elasticsearch."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain-elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "it can be instantiated using `CacheBackedEmbeddings.from_bytes_store` method."
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import CacheBackedEmbeddings\n",
|
||||
"from langchain_elasticsearch import ElasticsearchEmbeddingsCache\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"underlying_embeddings = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n",
|
||||
"\n",
|
||||
"store = ElasticsearchEmbeddingsCache(\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"llm-chat-cache\",\n",
|
||||
" metadata={\"project\": \"my_chatgpt_project\"},\n",
|
||||
" namespace=\"my_chatgpt_project\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"embeddings = CacheBackedEmbeddings.from_bytes_store(\n",
|
||||
" underlying_embeddings=OpenAIEmbeddings(),\n",
|
||||
" document_embedding_cache=store,\n",
|
||||
" query_embedding_cache=store,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The index_name parameter can also accept aliases. This allows to use the ILM: Manage the index lifecycle that we suggest to consider for managing retention and controlling cache growth.\n",
|
||||
"\n",
|
||||
"Look at the class docstring for all parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Index the generated vectors\n",
|
||||
"The cached vectors won't be searchable by default. The developer can customize the building of the Elasticsearch document in order to add indexed vector field.\n",
|
||||
"\n",
|
||||
"This can be done by subclassing end overriding methods. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_elasticsearch import ElasticsearchEmbeddingsCache\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class SearchableElasticsearchStore(ElasticsearchEmbeddingsCache):\n",
|
||||
" @property\n",
|
||||
" def mapping(self) -> Dict[str, Any]:\n",
|
||||
" mapping = super().mapping\n",
|
||||
" mapping[\"mappings\"][\"properties\"][\"vector\"] = {\n",
|
||||
" \"type\": \"dense_vector\",\n",
|
||||
" \"dims\": 1536,\n",
|
||||
" \"index\": True,\n",
|
||||
" \"similarity\": \"dot_product\",\n",
|
||||
" }\n",
|
||||
" return mapping\n",
|
||||
"\n",
|
||||
" def build_document(self, llm_input: str, vector: List[float]) -> Dict[str, Any]:\n",
|
||||
" body = super().build_document(llm_input, vector)\n",
|
||||
" body[\"vector\"] = vector\n",
|
||||
" return body"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "When overriding the mapping and the document building, please only make additive modifications, keeping the base mapping intact."
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -42,6 +42,12 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": "Note that you need to pass `query_instruction=\"\"` for `model_name=\"BAAI/bge-m3\"` see [FAQ BGE M3](https://huggingface.co/BAAI/bge-m3#faq). ",
|
||||
"id": "f35d54e529c4cb77"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
" username=\"PASTE YOUR USERNAME HERE\",\n",
|
||||
" password=\"PASTE YOUR PASSWORD HERE\",\n",
|
||||
" instance_id=\"openshift\",\n",
|
||||
" version=\"5.0\",\n",
|
||||
" version=\"4.8\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=embed_params,\n",
|
||||
")"
|
||||
|
||||
101
docs/docs/integrations/text_embedding/ipex_llm.ipynb
Normal file
101
docs/docs/integrations/text_embedding/ipex_llm.ipynb
Normal file
@@ -0,0 +1,101 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Local BGE Embeddings with IPEX-LLM on Intel CPU\n",
|
||||
"\n",
|
||||
"> [IPEX-LLM](https://github.com/intel-analytics/ipex-llm) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to conduct embedding tasks with `ipex-llm` optimizations on Intel CPU. This would be helpful in applications such as RAG, document QA, etc.\n",
|
||||
"\n",
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install IPEX-LLM for optimizations on Intel CPU, as well as `sentence-transformers`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu\n",
|
||||
"%pip install sentence-transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> **Note**\n",
|
||||
">\n",
|
||||
"> For Windows users, `--extra-index-url https://download.pytorch.org/whl/cpu` when install `ipex-llm` is not required.\n",
|
||||
"\n",
|
||||
"## Basic Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import IpexLLMBgeEmbeddings\n",
|
||||
"\n",
|
||||
"embedding_model = IpexLLMBgeEmbeddings(\n",
|
||||
" model_name=\"BAAI/bge-large-en-v1.5\",\n",
|
||||
" model_kwargs={},\n",
|
||||
" encode_kwargs={\"normalize_embeddings\": True},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"API Reference\n",
|
||||
"- [IpexLLMBgeEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.ipex_llm.IpexLLMBgeEmbeddings.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sentence = \"IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\"\n",
|
||||
"query = \"What is IPEX-LLM?\"\n",
|
||||
"\n",
|
||||
"text_embeddings = embedding_model.embed_documents([sentence, query])\n",
|
||||
"print(f\"text_embeddings[0][:10]: {text_embeddings[0][:10]}\")\n",
|
||||
"print(f\"text_embeddings[1][:10]: {text_embeddings[1][:10]}\")\n",
|
||||
"\n",
|
||||
"query_embedding = embedding_model.embed_query(query)\n",
|
||||
"print(f\"query_embedding[:10]: {query_embedding[:10]}\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
164
docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb
Normal file
164
docs/docs/integrations/text_embedding/ipex_llm_gpu.ipynb
Normal file
@@ -0,0 +1,164 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Local BGE Embeddings with IPEX-LLM on Intel GPU\n",
|
||||
"\n",
|
||||
"> [IPEX-LLM](https://github.com/intel-analytics/ipex-llm) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to conduct embedding tasks with `ipex-llm` optimizations on Intel GPU. This would be helpful in applications such as RAG, document QA, etc.\n",
|
||||
"\n",
|
||||
"> **Note**\n",
|
||||
">\n",
|
||||
"> It is recommended that only Windows users with Intel Arc A-Series GPU (except for Intel Arc A300-Series or Pro A60) run this Jupyter notebook directly. For other cases (e.g. Linux users, Intel iGPU, etc.), it is recommended to run the code with Python scripts in terminal for best experiences.\n",
|
||||
"\n",
|
||||
"## Install Prerequisites\n",
|
||||
"To benefit from IPEX-LLM on Intel GPUs, there are several prerequisite steps for tools installation and environment preparation.\n",
|
||||
"\n",
|
||||
"If you are a Windows user, visit the [Install IPEX-LLM on Windows with Intel GPU Guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html), and follow [Install Prerequisites](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html#install-prerequisites) to update GPU driver (optional) and install Conda.\n",
|
||||
"\n",
|
||||
"If you are a Linux user, visit the [Install IPEX-LLM on Linux with Intel GPU](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_linux_gpu.html), and follow [**Install Prerequisites**](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_linux_gpu.html#install-prerequisites) to install GPU driver, Intel® oneAPI Base Toolkit 2024.0, and Conda.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"After the prerequisites installation, you should have created a conda environment with all prerequisites installed. **Start the jupyter service in this conda environment**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install IPEX-LLM for optimizations on Intel GPU, as well as `sentence-transformers`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/\n",
|
||||
"%pip install sentence-transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> **Note**\n",
|
||||
">\n",
|
||||
"> You can also use `https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/` as the extra-indel-url.\n",
|
||||
"\n",
|
||||
"## Runtime Configuration\n",
|
||||
"\n",
|
||||
"For optimal performance, it is recommended to set several environment variables based on your device:\n",
|
||||
"\n",
|
||||
"### For Windows Users with Intel Core Ultra integrated GPU"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"SYCL_CACHE_PERSISTENT\"] = \"1\"\n",
|
||||
"os.environ[\"BIGDL_LLM_XMX_DISABLED\"] = \"1\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### For Windows Users with Intel Arc A-Series GPU"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"SYCL_CACHE_PERSISTENT\"] = \"1\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> **Note**\n",
|
||||
">\n",
|
||||
"> For the first time that each model runs on Intel iGPU/Intel Arc A300-Series or Pro A60, it may take several minutes to compile.\n",
|
||||
">\n",
|
||||
"> For other GPU type, please refer to [here](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html#runtime-configuration) for Windows users, and [here](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html#id5) for Linux users.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Basic Usage\n",
|
||||
"\n",
|
||||
"Setting `device` to `\"xpu\"` in `model_kwargs` when initializing `IpexLLMBgeEmbeddings` will put the embedding model on Intel GPU and benefit from IPEX-LLM optimizations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import IpexLLMBgeEmbeddings\n",
|
||||
"\n",
|
||||
"embedding_model = IpexLLMBgeEmbeddings(\n",
|
||||
" model_name=\"BAAI/bge-large-en-v1.5\",\n",
|
||||
" model_kwargs={\"device\": \"xpu\"},\n",
|
||||
" encode_kwargs={\"normalize_embeddings\": True},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"API Reference\n",
|
||||
"- [IpexLLMBgeEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.ipex_llm.IpexLLMBgeEmbeddings.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sentence = \"IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency.\"\n",
|
||||
"query = \"What is IPEX-LLM?\"\n",
|
||||
"\n",
|
||||
"text_embeddings = embedding_model.embed_documents([sentence, query])\n",
|
||||
"print(f\"text_embeddings[0][:10]: {text_embeddings[0][:10]}\")\n",
|
||||
"print(f\"text_embeddings[1][:10]: {text_embeddings[1][:10]}\")\n",
|
||||
"\n",
|
||||
"query_embedding = embedding_model.embed_query(query)\n",
|
||||
"print(f\"query_embedding[:10]: {query_embedding[:10]}\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -10,6 +10,16 @@
|
||||
"Let's load the Jina Embedding class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2f0a1567-6273-47a3-b19d-c30af2470810",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -17,7 +27,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import JinaEmbeddings"
|
||||
"import requests\n",
|
||||
"from langchain_community.embeddings import JinaEmbeddings\n",
|
||||
"from numpy import dot\n",
|
||||
"from numpy.linalg import norm\n",
|
||||
"from PIL import Image"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -27,9 +41,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = JinaEmbeddings(\n",
|
||||
"text_embeddings = JinaEmbeddings(\n",
|
||||
" jina_api_key=\"jina_*\", model_name=\"jina-embeddings-v2-base-en\"\n",
|
||||
")"
|
||||
")\n",
|
||||
"\n",
|
||||
"image_embeddings = JinaEmbeddings(jina_api_key=\"jina_*\", model_name=\"jina-clip-v1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -39,7 +55,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
"text = \"This is a test document.\"\n",
|
||||
"\n",
|
||||
"image = \"https://avatars.githubusercontent.com/u/126733545?v=4\"\n",
|
||||
"\n",
|
||||
"description = \"Logo of a parrot and a chain on green background\"\n",
|
||||
"\n",
|
||||
"im = Image.open(requests.get(image, stream=True).raw)\n",
|
||||
"print(\"Image:\")\n",
|
||||
"display(im)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -49,7 +73,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
"query_result = text_embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -69,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
"doc_result = text_embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -81,6 +105,76 @@
|
||||
"source": [
|
||||
"print(doc_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ac2aace1-27af-4c4f-96f8-8e8b20d95b98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_result = image_embeddings.embed_images([image])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6687808c-1977-4128-a960-888bb82c46e1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(image_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2844ef7c-cf9b-4e28-b627-09887aaa0a6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description_result = image_embeddings.embed_documents([description])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "23372332-2ea3-4e4a-abc8-8307d45ebc95",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(description_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08d3ba5e-8957-4b10-97e3-40359ab165a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cosine_similarity = dot(image_result[0], description_result[0]) / (\n",
|
||||
" norm(image_result[0]) * norm(description_result[0])\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6be56ff9-774b-4347-a5cf-57d8db9e2cf2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(cosine_similarity)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f280807-a02b-4d4e-8ebd-01be33117999",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -99,7 +193,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Oracle Cloud Infrastructure Generative AI"
|
||||
"# Oracle Cloud Infrastructure Generative AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,27 @@
|
||||
"source": [
|
||||
"# Ollama\n",
|
||||
"\n",
|
||||
"Let's load the Ollama Embeddings class."
|
||||
"\"Ollama supports embedding models, making it possible to build retrieval augmented generation (RAG) applications that combine text prompts with existing documents or other data.\" Learn more about the introduction to [Ollama Embeddings](https://ollama.com/blog/embedding-models) in the blog post.\n",
|
||||
"\n",
|
||||
"To use Ollama Embeddings, first, install [LangChain Community](https://pypi.org/project/langchain-community/) package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "854d6a2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "54fbb4cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load the Ollama Embeddings class:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -17,26 +37,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OllamaEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2c66e5da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OllamaEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "01370375",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OllamaEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = (\n",
|
||||
" OllamaEmbeddings()\n",
|
||||
") # by default, uses llama2. Run `ollama pull llama2` to pull down the model\n",
|
||||
"\n",
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
@@ -105,7 +111,13 @@
|
||||
"id": "bb61bbeb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's load the Ollama Embeddings class with smaller model (e.g. llama:7b). Note: See other supported models [https://ollama.ai/library](https://ollama.ai/library)"
|
||||
"### Embedding Models\n",
|
||||
"\n",
|
||||
"Ollama has embedding models, that are lightweight enough for use in embeddings, with the smallest about the size of 25Mb. See some of the available [embedding models from Ollama](https://ollama.com/blog/embedding-models).\n",
|
||||
"\n",
|
||||
"Let's load the Ollama Embeddings class with smaller model (e.g. `mxbai-embed-large`). \n",
|
||||
"\n",
|
||||
"> Note: See other supported models [https://ollama.ai/library](https://ollama.ai/library)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,26 +127,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OllamaEmbeddings(model=\"llama2:7b\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "14aefb64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "3c39ed33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OllamaEmbeddings(model=\"mxbai-embed-large\")\n",
|
||||
"text = \"This is a test document.\"\n",
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "ae8077b8",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [openaiembeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "278b6c63",
|
||||
|
||||
@@ -193,13 +193,6 @@
|
||||
"Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** Currently, OracleEmbeddings processes each embedding generation request individually, without batching, by calling REST endpoints separately for each request. This method could potentially lead to exceeding the maximum request per minute quota set by some providers. However, we are actively working to enhance this process by implementing request batching, which will allow multiple embedding requests to be combined into fewer API calls, thereby optimizing our use of provider resources and adhering to their request limits. This update is expected to be rolled out soon, eliminating the current limitation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "0f1199c1-f885-4290-b5e7-d1defd49abe1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Soalr\n",
|
||||
"# Solar\n",
|
||||
"\n",
|
||||
"[Solar](https://console.upstage.ai/services/embedding) offers an embeddings service.\n",
|
||||
"\n",
|
||||
|
||||
168
docs/docs/integrations/tools/databricks.ipynb
Normal file
168
docs/docs/integrations/tools/databricks.ipynb
Normal file
@@ -0,0 +1,168 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Databricks Unity Catalog (UC)\n",
|
||||
"\n",
|
||||
"This notebook shows how to use UC functions as LangChain tools.\n",
|
||||
"\n",
|
||||
"See Databricks documentation ([AWS](https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-sql-function.html)|[Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/language-manual/sql-ref-syntax-ddl-create-sql-function)|[GCP](https://docs.gcp.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-create-sql-function.html)) to learn how to create SQL or Python functions in UC. Do not skip function and parameter comments, which are critical for LLMs to call functions properly.\n",
|
||||
"\n",
|
||||
"In this example notebook, we create a simple Python function that executes arbitary code and use it as a LangChain tool:\n",
|
||||
"\n",
|
||||
"```sql\n",
|
||||
"CREATE FUNCTION main.tools.python_exec (\n",
|
||||
" code STRING COMMENT 'Python code to execute. Remember to print the final result to stdout.'\n",
|
||||
")\n",
|
||||
"RETURNS STRING\n",
|
||||
"LANGUAGE PYTHON\n",
|
||||
"COMMENT 'Executes Python code and returns its stdout.'\n",
|
||||
"AS $$\n",
|
||||
" import sys\n",
|
||||
" from io import StringIO\n",
|
||||
" stdout = StringIO()\n",
|
||||
" sys.stdout = stdout\n",
|
||||
" exec(code)\n",
|
||||
" return stdout.getvalue()\n",
|
||||
"$$\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"It runs in a secure and isolated environment within a Databricks SQL warehouse."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet databricks-sdk langchain-community langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.databricks import UCFunctionToolkit\n",
|
||||
"\n",
|
||||
"tools = (\n",
|
||||
" UCFunctionToolkit(\n",
|
||||
" # You can find the SQL warehouse ID in its UI after creation.\n",
|
||||
" warehouse_id=\"xxxx123456789\"\n",
|
||||
" )\n",
|
||||
" .include(\n",
|
||||
" # Include functions as tools using their qualified names.\n",
|
||||
" # You can use \"{catalog_name}.{schema_name}.*\" to get all functions in a schema.\n",
|
||||
" \"main.tools.python_exec\",\n",
|
||||
" )\n",
|
||||
" .get_tools()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Make sure to use tool for information.\",\n",
|
||||
" ),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `main__tools__python_exec` with `{'code': 'print(36939 * 8922.4)'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m{\"format\": \"SCALAR\", \"value\": \"329584533.59999996\\n\", \"truncated\": false}\u001b[0m\u001b[32;1m\u001b[1;3mThe result of the multiplication 36939 * 8922.4 is 329,584,533.60.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': '36939 * 8922.4',\n",
|
||||
" 'output': 'The result of the multiplication 36939 * 8922.4 is 329,584,533.60.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n",
|
||||
"agent_executor.invoke({\"input\": \"36939 * 8922.4\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "llm",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -65,6 +65,17 @@
|
||||
"ExaSearchRetriever is a retriever that uses Exa Search to retrieve relevant documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-note}\n",
|
||||
"\n",
|
||||
"The `max_characters` parameter for **TextContentsOptions** used to be called `max_length` which is now deprecated. Make sure to use `max_characters` instead.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
@@ -96,7 +107,7 @@
|
||||
"\n",
|
||||
"# retrieve 5 documents, with content truncated at 1000 characters\n",
|
||||
"retriever = ExaSearchRetriever(\n",
|
||||
" k=5, text_contents_options=TextContentsOptions(max_length=200)\n",
|
||||
" k=5, text_contents_options=TextContentsOptions(max_characters=200)\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(\n",
|
||||
|
||||
@@ -23,8 +23,6 @@
|
||||
"id": "d2d6ca14-fb7e-4172-9aa0-a3119a064b96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n",
|
||||
"\n",
|
||||
"_Note: in addition to access to the database, an OpenAI API Key is required to run the full example._"
|
||||
]
|
||||
},
|
||||
@@ -51,7 +49,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install --upgrade langchain-astradb"
|
||||
"pip install -qU langchain-astradb"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -59,7 +57,7 @@
|
||||
"id": "2453d83a-bc8f-41e1-a692-befe4dd90156",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"_**Note.** the following are all packages required to run the full demo on this page. Depending on your LangChain setup, some of them may need to be installed:_"
|
||||
"_Make sure you have installed the packages required to run all of this demo:_"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -69,7 +67,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install langchain langchain-openai datasets pypdf"
|
||||
"pip install -qU langchain langchain-community langchain-openai datasets pypdf"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -90,9 +88,8 @@
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from datasets import (\n",
|
||||
" load_dataset,\n",
|
||||
")\n",
|
||||
"from astrapy.info import CollectionVectorServiceOptions\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"from langchain_community.document_loaders import PyPDFLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
@@ -102,26 +99,6 @@
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1983f1da-0ae7-4a9b-bf4c-4ade328f7a3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass(\"OPENAI_API_KEY = \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c656df06-e938-4bc5-b570-440b8b7a0189",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embe = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22866f09-e10d-4f05-a24b-b9420129462e",
|
||||
@@ -145,7 +122,7 @@
|
||||
"id": "68f61b01-3e09-47c1-9d67-5d6915c86626",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connection parameters\n",
|
||||
"## DB Connection parameters\n",
|
||||
"\n",
|
||||
"These are found on your Astra DB dashboard:\n",
|
||||
"\n",
|
||||
@@ -173,7 +150,53 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "196268bd-a950-41c3-bede-f5b55f6a0804",
|
||||
"id": "84a1fe85-a42c-4f15-92e1-f79f1dd43ea2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the vector store\n",
|
||||
"\n",
|
||||
"There are two ways to create an Astra DB vector store, which differ in how the embeddings are computed.\n",
|
||||
"\n",
|
||||
"*Explicit embeddings*. You can separately instantiate a `langchain_core.embeddings.Embeddings` class and pass it to the `AstraDBVectorStore` constructor, just like with most other LangChain vector stores.\n",
|
||||
"\n",
|
||||
"*Integrated embedding computation*. Alternatively, you can use the [Vectorize](https://www.datastax.com/blog/simplifying-vector-embedding-generation-with-astra-vectorize) feature of Astra DB and simply specify the name of a supported embedding model when creating the store. The embedding computations are entirely handled within the database. (To proceed with this method, you must have enabled the desired embedding integration for your database, as described [in the docs](https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html).)\n",
|
||||
"\n",
|
||||
"**Please choose one method and run the corresponding cells only.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8c435386-e8d5-41f4-a9e5-7b609ef781f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Method 1: provide embeddings explicitly\n",
|
||||
"\n",
|
||||
"This demo will use an OpenAI embedding model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dfa5c005-9738-4c53-b8a8-8540fcbb8bad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass(\"OPENAI_API_KEY = \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3accae6f-73e2-483a-83f7-76eb33558a1f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"my_embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "465b1b16-5363-4c4f-9917-a49e02a86c14",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can create the vector store:"
|
||||
@@ -187,7 +210,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vstore = AstraDBVectorStore(\n",
|
||||
" embedding=embe,\n",
|
||||
" embedding=my_embeddings,\n",
|
||||
" collection_name=\"astra_vector_demo\",\n",
|
||||
" api_endpoint=ASTRA_DB_API_ENDPOINT,\n",
|
||||
" token=ASTRA_DB_APPLICATION_TOKEN,\n",
|
||||
@@ -195,6 +218,46 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d5d2bfa-c071-4a5b-8b6e-3daa1b6de164",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Method 2: use Astra Vectorize (embeddings integrated in Astra DB)\n",
|
||||
"\n",
|
||||
"Here it is assumed that you have\n",
|
||||
"\n",
|
||||
"- enabled the OpenAI integration in your Astra DB organization,\n",
|
||||
"- added an API Key named `\"MY_OPENAI_API_KEY\"` to the integration, and\n",
|
||||
"- scoped it to the database you are using.\n",
|
||||
"\n",
|
||||
"For more details please consult the [documentation](https://docs.datastax.com/en/astra-db-serverless/integrations/embedding-providers/openai.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9d18455d-3fa6-4f9e-b687-3a2bc71c9a23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openai_vectorize_options = CollectionVectorServiceOptions(\n",
|
||||
" provider=\"openai\",\n",
|
||||
" model_name=\"text-embedding-3-small\",\n",
|
||||
" authentication={\n",
|
||||
" \"providerKey\": \"MY_OPENAI_API_KEY.providerKey\",\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vstore = AstraDBVectorStore(\n",
|
||||
" collection_name=\"astra_vectorize_demo\",\n",
|
||||
" api_endpoint=ASTRA_DB_API_ENDPOINT,\n",
|
||||
" token=ASTRA_DB_APPLICATION_TOKEN,\n",
|
||||
" namespace=ASTRA_DB_KEYSPACE,\n",
|
||||
" collection_vector_service_options=openai_vectorize_options,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a348678-b2f6-46ca-9a0d-2eb4cc6b66b1",
|
||||
@@ -334,7 +397,9 @@
|
||||
"id": "b14ea558-bfbe-41ce-807e-d70670060ada",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### MMR (Maximal-marginal-relevance) search"
|
||||
"### MMR (Maximal-marginal-relevance) search\n",
|
||||
"\n",
|
||||
"_Note: the MMR search method is not (yet) supported for vector stores built with Astra Vectorize._"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -537,7 +602,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"- `.peek`\n",
|
||||
"- and `.query` runs the similarity search.\n",
|
||||
"\n",
|
||||
"View full docs at [docs](https://docs.trychroma.com/reference/Collection). To access these methods directly, you can do `._collection.method()`\n"
|
||||
"View full docs at [docs](https://docs.trychroma.com/reference/py-collection). To access these methods directly, you can do `._collection.method()`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet pymilvus"
|
||||
"%pip install --upgrade --quiet langchain_milvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -67,7 +67,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import Milvus\n",
|
||||
"from langchain_milvus.vectorstores import Milvus\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
|
||||
@@ -584,9 +584,9 @@
|
||||
"You are an expert in state of the union topics. You are provided multiple context items that are related to the prompt you have to answer.\n",
|
||||
"Use the following pieces of context to answer the question at the end.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"'''\n",
|
||||
"{context}\n",
|
||||
"```\n",
|
||||
"'''\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
|
||||
@@ -2,22 +2,20 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"id": "559f8e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vectara\n",
|
||||
"\n",
|
||||
">[Vectara](https://vectara.com/) is the trusted GenAI platform that provides an easy-to-use API for document indexing and querying. \n",
|
||||
"[Vectara](https://vectara.com/) provides a Trusted Generative AI platform, allowing organizations to rapidly create a ChatGPT-like experience (an AI assistant) which is grounded in the data, documents, and knowledge that they have (technically, it is Retrieval-Augmented-Generation-as-a-service). \n",
|
||||
"\n",
|
||||
"Vectara provides an end-to-end managed service for Retrieval Augmented Generation or [RAG](https://vectara.com/grounded-generation/), which includes:\n",
|
||||
"\n",
|
||||
"1. A way to extract text from document files and chunk them into sentences.\n",
|
||||
"\n",
|
||||
"2. The state-of-the-art [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model. Each text chunk is encoded into a vector embedding using Boomerang, and stored in the Vectara internal knowledge (vector+text) store\n",
|
||||
"\n",
|
||||
"3. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))\n",
|
||||
"\n",
|
||||
"4. An option to create [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents, including citations.\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) and [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/))\n",
|
||||
"7. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"\n",
|
||||
@@ -28,25 +26,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc0f4344",
|
||||
"id": "e97dcf11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps:\n",
|
||||
"\n",
|
||||
"1. [Sign up](https://www.vectara.com/integrations/langchain) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara account. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: customer ID, corpus ID and api_key.\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: `customer ID`, `corpus ID` and `api_key`.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
"> For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
@@ -57,39 +52,50 @@
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"2. Add them to the Vectara vectorstore constructor:\n",
|
||||
"2. Add them to the `Vectara` vectorstore constructor:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectorstore = Vectara(\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eeead681",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connecting to Vectara from LangChain\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To get started, let's ingest the documents using the from_documents() method.\n",
|
||||
"We assume here that you've added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and query+indexing VECTARA_API_KEY as environment variables."
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "04a1f1a0",
|
||||
"id": "aac7a9a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings.fake import FakeEmbeddings\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<YOUR_VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = \"<YOUR_VECTARA_CORPUS_ID>\"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"<YOUR_VECTARA_CUSTOMER_ID>\"\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
"from langchain_community.vectorstores.vectara import (\n",
|
||||
" RerankConfig,\n",
|
||||
" SummaryConfig,\n",
|
||||
" VectaraQueryConfig,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "875ffb7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we load the state-of-the-union text into Vectara. \n",
|
||||
"\n",
|
||||
"Note that we use the `from_files` interface which does not require any local processing or chunking - Vectara receives the file content and performs all the necessary pre-processing, chunking and embedding of the file into its knowledge store.\n",
|
||||
"\n",
|
||||
"In this case it uses a `.txt` file but the same works for many other [file types](https://docs.vectara.com/docs/api-reference/indexing-apis/file-upload/file-upload-filetypes)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,132 +105,154 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)"
|
||||
"vectara = Vectara.from_files([\"state_of_the_union.txt\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22a6b953",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Vectara RAG (retrieval augmented generation)\n",
|
||||
"\n",
|
||||
"We now create a `VectaraQueryConfig` object to control the retrieval and summarization options:\n",
|
||||
"* We enable summarization, specifying we would like the LLM to pick the top 7 matching chunks and respond in English\n",
|
||||
"* We enable MMR (max marginal relevance) in the retrieval process, with a 0.2 diversity bias factor\n",
|
||||
"* We want the top-10 results, with hybrid search configured with a value of 0.025\n",
|
||||
"\n",
|
||||
"Using this configuration, let's create a LangChain `Runnable` object that encpasulates the full Vectara RAG pipeline, using the `as_rag` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8429667e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:22.525091Z",
|
||||
"start_time": "2023-04-04T10:51:22.522015Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectara = Vectara.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embedding=FakeEmbeddings(size=768),\n",
|
||||
" doc_metadata={\"speech\": \"state-of-the-union\"},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "90dbf3e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Vectara's indexing API provides a file upload API where the file is handled directly by Vectara - pre-processed, chunked optimally and added to the Vectara vector store.\n",
|
||||
"To use this, we added the add_files() method (as well as from_files()). \n",
|
||||
"\n",
|
||||
"Let's see this in action. We pick two PDF documents to upload: \n",
|
||||
"\n",
|
||||
"1. The \"I have a dream\" speech by Dr. King\n",
|
||||
"2. Churchill's \"We Shall Fight on the Beaches\" speech"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "85ef3468",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tempfile\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
" [\n",
|
||||
" \"https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf\",\n",
|
||||
" \"I-have-a-dream\",\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"https://www.parkwayschools.net/cms/lib/MO01931486/Centricity/Domain/1578/Churchill_Beaches_Speech.pdf\",\n",
|
||||
" \"we shall fight on the beaches\",\n",
|
||||
" ],\n",
|
||||
"]\n",
|
||||
"files_list = []\n",
|
||||
"for url, _ in urls:\n",
|
||||
" name = tempfile.NamedTemporaryFile().name\n",
|
||||
" urllib.request.urlretrieve(url, name)\n",
|
||||
" files_list.append(name)\n",
|
||||
"\n",
|
||||
"docsearch: Vectara = Vectara.from_files(\n",
|
||||
" files=files_list,\n",
|
||||
" embedding=FakeEmbeddings(size=768),\n",
|
||||
" metadatas=[{\"url\": url, \"speech\": title} for url, title in urls],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f9215c8",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T09:27:29.920258Z",
|
||||
"start_time": "2023-04-04T09:27:29.913714Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Similarity search\n",
|
||||
"\n",
|
||||
"The simplest scenario for using Vectara is to perform a similarity search. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a8c513ab",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:25.204469Z",
|
||||
"start_time": "2023-04-04T10:51:24.855618Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = vectara.similarity_search(\n",
|
||||
" query, n_sentence_context=0, filter=\"doc.speech = 'state-of-the-union'\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "53324492",
|
||||
"id": "9ecda054-96a8-4a91-aeae-32006efb1ac8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '596', 'len': '97', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.”', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '141', 'len': '117', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.”', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '0', 'len': '77', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='Last month, I announced our plan to supercharge \\nthe Cancer Moonshot that President Obama asked me to lead six years ago.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '0', 'len': '122', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='He thought he could roll into Ukraine and the world would roll over.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '664', 'len': '68', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='That’s why one of the first things I did as President was fight to pass the American Rescue Plan.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '314', 'len': '97', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='And he thought he could divide us at home.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '160', 'len': '42', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='He met the Ukrainian people.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '788', 'len': '28', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='He thought the West and NATO wouldn’t respond.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '113', 'len': '46', 'speech': 'state-of-the-union'}),\n",
|
||||
" Document(page_content='In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '772', 'len': '131', 'speech': 'state-of-the-union'})]"
|
||||
"\"Biden addressed various topics in his statements. He highlighted the need to confront Putin by building a coalition of nations[1]. He also expressed commitment to investigating the impact of burn pits on soldiers' health, including his son's case[2]. Additionally, Biden outlined a plan to fight inflation by cutting prescription drug costs[3]. He emphasized the importance of continuing to combat COVID-19 and not just accepting living with it[4]. Furthermore, he discussed measures to weaken Russia economically and target Russian oligarchs[6]. Biden also advocated for passing the Equality Act to support LGBTQ+ Americans and condemned state laws targeting transgender individuals[7].\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang=\"eng\")\n",
|
||||
"rerank_config = RerankConfig(reranker=\"mmr\", rerank_k=50, mmr_diversity_bias=0.2)\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query_str = \"what did Biden say?\"\n",
|
||||
"\n",
|
||||
"rag = vectara.as_rag(config)\n",
|
||||
"rag.invoke(query_str)[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd825d63-93a0-4e45-a455-bfabb01ee1a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the streaming interface like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "27f01330-8917-4eff-b603-59ab2571a4d2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Biden addressed various topics in his statements. He highlighted the importance of building coalitions to confront global challenges [1]. He also expressed commitment to investigating the impact of burn pits on soldiers' health, including his son's case [2, 4]. Additionally, Biden outlined his plan to combat inflation by cutting prescription drug costs and reducing the deficit, with support from Nobel laureates and business leaders [3]. He emphasized the ongoing fight against COVID-19 and the need to continue combating the virus [5]. Furthermore, Biden discussed measures taken to weaken Russia's economic and military strength, targeting Russian oligarchs and corrupt leaders [6]. He also advocated for passing the Equality Act to support LGBTQ+ Americans and address discriminatory state laws [7]."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = {}\n",
|
||||
"curr_key = None\n",
|
||||
"for chunk in rag.stream(query_str):\n",
|
||||
" for key in chunk:\n",
|
||||
" if key not in output:\n",
|
||||
" output[key] = chunk[key]\n",
|
||||
" else:\n",
|
||||
" output[key] += chunk[key]\n",
|
||||
" if key == \"answer\":\n",
|
||||
" print(chunk[key], end=\"\", flush=True)\n",
|
||||
" curr_key = key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7eaf871d-eba2-46b1-bfa3-b9c82947d2be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Hallucination detection and Factual Consistency Score\n",
|
||||
"\n",
|
||||
"Vectara created [HHEM](https://huggingface.co/vectara/hallucination_evaluation_model) - an open source model that can be used to evaluate RAG responses for factual consistency. \n",
|
||||
"\n",
|
||||
"As part of the Vectara RAG, the \"Factual Consistency Score\" (or FCS), which is an improved version of the open source HHEM is made available via the API. This is automatically included in the output of the RAG pipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b2e0aa2c-7c8e-4d79-8abc-66f5a1f961b3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Biden addressed various topics in his statements. He highlighted the need to confront Putin by building a coalition of nations[1]. He also expressed his commitment to investigating the impact of burn pits on soldiers' health, referencing his son's experience[2]. Additionally, Biden discussed his plan to fight inflation by cutting prescription drug costs and garnering support from Nobel laureates and business leaders[4]. Furthermore, he emphasized the importance of continuing to combat COVID-19 and not merely accepting living with the virus[5]. Biden's remarks encompassed international relations, healthcare challenges faced by soldiers, economic strategies, and the ongoing battle against the pandemic.\n",
|
||||
"Vectara FCS = 0.41796625\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"summary_config = SummaryConfig(is_enabled=True, max_results=5, response_lang=\"eng\")\n",
|
||||
"rerank_config = RerankConfig(reranker=\"mmr\", rerank_k=50, mmr_diversity_bias=0.1)\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"rag = vectara.as_rag(config)\n",
|
||||
"resp = rag.invoke(query_str)\n",
|
||||
"print(resp[\"answer\"])\n",
|
||||
"print(f\"Vectara FCS = {resp['fcs']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b651396a-5726-4d49-bacf-c9d7a5ddcf7a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Vectara as a langchain retreiver\n",
|
||||
"\n",
|
||||
"The Vectara component can also be used just as a retriever. \n",
|
||||
"\n",
|
||||
"In this case, it behaves just like any other LangChain retriever. The main use of this mode is for semantic search, and in this case we disable summarization:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "19cd2f86",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully. We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.', metadata={'lang': 'eng', 'section': '1', 'offset': '2160', 'len': '36', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.', metadata={'lang': 'eng', 'section': '1', 'offset': '34652', 'len': '60', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. Danielle says Heath was a fighter to the very end. He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are.', metadata={'lang': 'eng', 'section': '1', 'offset': '35442', 'len': '57', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -233,304 +261,95 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"found_docs"
|
||||
"config.summary_config.is_enabled = False\n",
|
||||
"config.k = 3\n",
|
||||
"retriever = vectara.as_retriever(config=config)\n",
|
||||
"retriever.invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c49284ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For backwards compatibility, you can also enable summarization with a retriever, in which case the summary is added as an additional Document object:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "fc516993",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:25.220984Z",
|
||||
"start_time": "2023-04-04T10:51:25.213943Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"id": "59268e9a-6089-4bb2-8c61-1ea6b956f83c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully. We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.', metadata={'lang': 'eng', 'section': '1', 'offset': '2160', 'len': '36', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.', metadata={'lang': 'eng', 'section': '1', 'offset': '34652', 'len': '60', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. Danielle says Heath was a fighter to the very end. He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are.', metadata={'lang': 'eng', 'section': '1', 'offset': '35442', 'len': '57', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content=\"Biden discussed various topics in his statements. He highlighted the importance of unity and preparation to confront challenges, such as building coalitions to address global issues [1]. Additionally, he shared personal stories about the impact of health issues on soldiers, including his son's experience with brain cancer possibly linked to burn pits [2]. Biden also outlined his plans to combat inflation by cutting prescription drug costs and emphasized the ongoing efforts to combat COVID-19, rejecting the idea of merely living with the virus [4, 5]. Overall, Biden's messages revolved around unity, healthcare challenges faced by soldiers, economic plans, and the ongoing fight against COVID-19.\", metadata={'summary': True, 'fcs': 0.54751414})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(found_docs[0].page_content)"
|
||||
"config.summary_config.is_enabled = True\n",
|
||||
"config.k = 3\n",
|
||||
"retriever = vectara.as_retriever(config=config)\n",
|
||||
"retriever.invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1bda9bf5",
|
||||
"id": "8f16bf8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score\n",
|
||||
"## Advanced LangChain query pre-processing with Vectara\n",
|
||||
"\n",
|
||||
"Sometimes we might want to perform the search, but also obtain a relevancy score to know how good is a particular result."
|
||||
"Vectara's \"RAG as a service\" does a lot of the heavy lifting in creating question answering or chatbot chains. The integration with LangChain provides the option to use additional capabilities such as query pre-processing like `SelfQueryRetriever` or `MultiQueryRetriever`. Let's look at an example of using the [MultiQueryRetriever](https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever).\n",
|
||||
"\n",
|
||||
"Since MQR uses an LLM we have to set that up - here we choose `ChatOpenAI`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8804a21d",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:25.631585Z",
|
||||
"start_time": "2023-04-04T10:51:25.227384Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = vectara.similarity_search_with_score(\n",
|
||||
" query,\n",
|
||||
" filter=\"doc.speech = 'state-of-the-union'\",\n",
|
||||
" score_threshold=0.2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "756a6887",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:25.642282Z",
|
||||
"start_time": "2023-04-04T10:51:25.635947Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice.\n",
|
||||
"\n",
|
||||
"Score: 0.74179757\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"document, score = found_docs[0]\n",
|
||||
"print(document.page_content)\n",
|
||||
"print(f\"\\nScore: {score}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f9876a8",
|
||||
"id": "e14325b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's do similar search for content in the files we uploaded"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "47784de5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"With this threshold of 1.2 we have 0 documents\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"We must forever conduct our struggle\"\n",
|
||||
"min_score = 1.2\n",
|
||||
"found_docs = vectara.similarity_search_with_score(\n",
|
||||
" query,\n",
|
||||
" filter=\"doc.speech = 'I-have-a-dream'\",\n",
|
||||
" score_threshold=min_score,\n",
|
||||
")\n",
|
||||
"print(f\"With this threshold of {min_score} we have {len(found_docs)} documents\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "29f465e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"With this threshold of 0.2 we have 10 documents\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"We must forever conduct our struggle\"\n",
|
||||
"min_score = 0.2\n",
|
||||
"found_docs = vectara.similarity_search_with_score(\n",
|
||||
" query,\n",
|
||||
" filter=\"doc.speech = 'I-have-a-dream'\",\n",
|
||||
" score_threshold=min_score,\n",
|
||||
")\n",
|
||||
"print(f\"With this threshold of {min_score} we have {len(found_docs)} documents\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "471112c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"MMR is an important retrieval capability for many applications, whereby search results feeding your GenAI application are reranked to improve diversity of results. \n",
|
||||
"\n",
|
||||
"Let's see how that works with Vectara:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "5d597e91",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Economic assistance.\n",
|
||||
"\n",
|
||||
"Grow the workforce. Build the economy from the bottom up \n",
|
||||
"and the middle out, not from the top down.\n",
|
||||
"\n",
|
||||
"When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America.\n",
|
||||
"\n",
|
||||
"Our economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long.\n",
|
||||
"\n",
|
||||
"Economists call it “increasing the productive capacity of our economy.”\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"state of the economy\"\n",
|
||||
"found_docs = vectara.similarity_search(\n",
|
||||
" query,\n",
|
||||
" n_sentence_context=0,\n",
|
||||
" filter=\"doc.speech = 'state-of-the-union'\",\n",
|
||||
" k=5,\n",
|
||||
" mmr_config={\"is_enabled\": True, \"mmr_k\": 50, \"diversity_bias\": 0.0},\n",
|
||||
")\n",
|
||||
"print(\"\\n\\n\".join([x.page_content for x in found_docs]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "be2b2326",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Economic assistance.\n",
|
||||
"\n",
|
||||
"The Russian stock market has lost 40% of its value and trading remains suspended.\n",
|
||||
"\n",
|
||||
"But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century.\n",
|
||||
"\n",
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections.\n",
|
||||
"\n",
|
||||
"The federal government spends about $600 Billion a year to keep the country safe and secure.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"state of the economy\"\n",
|
||||
"found_docs = vectara.similarity_search(\n",
|
||||
" query,\n",
|
||||
" n_sentence_context=0,\n",
|
||||
" filter=\"doc.speech = 'state-of-the-union'\",\n",
|
||||
" k=5,\n",
|
||||
" mmr_config={\"is_enabled\": True, \"mmr_k\": 50, \"diversity_bias\": 1.0},\n",
|
||||
")\n",
|
||||
"print(\"\\n\\n\".join([x.page_content for x in found_docs]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "10c1427e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As you can see, in the first example diversity_bias was set to 0.0 (equivalent to diversity reranking disabled), which resulted in a the top-5 most relevant documents. With diversity_bias=1.0 we maximize diversity and as you can see the resulting top documents are much more diverse in their semantic meanings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "691a82d6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Vectara as a Retriever\n",
|
||||
"\n",
|
||||
"Finally let's see how to use Vectara with the `as_retriever()` interface:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "9427195f",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:26.031451Z",
|
||||
"start_time": "2023-04-04T10:51:26.018763Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"VectorStoreRetriever(tags=['Vectara'], vectorstore=<langchain_community.vectorstores.vectara.Vectara object at 0x109a3c760>)"
|
||||
"\"Biden's statement highlighted his efforts to unite freedom-loving nations against Putin's aggression, sharing information in advance to counter Russian lies and hold Putin accountable[1]. Additionally, he emphasized his commitment to military families, like Danielle Robinson, and outlined plans for more affordable housing, Pre-K for 3- and 4-year-olds, and ensuring no additional taxes for those earning less than $400,000 a year[2][3]. The statement also touched on the readiness of the West and NATO to respond to Putin's actions, showcasing extensive preparation and coalition-building efforts[4]. Heath Robinson's story, a combat medic who succumbed to cancer from burn pits, was used to illustrate the resilience and fight for better conditions[5].\""
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever = vectara.as_retriever()\n",
|
||||
"retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "f3c70c31",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:26.495652Z",
|
||||
"start_time": "2023-04-04T10:51:26.046407Z"
|
||||
},
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '596', 'len': '97', 'speech': 'state-of-the-union'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"retriever.invoke(query)[0]"
|
||||
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"mqr = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_summary(documents):\n",
|
||||
" return documents[-1].page_content\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"(mqr | get_summary).invoke(query_str)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2300e785",
|
||||
"id": "8060a423-b291-4166-8fd7-ba0e01692b51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -552,7 +371,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.11.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -3,66 +3,132 @@
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "17546ebb",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_position: 4\n",
|
||||
"keywords: [agent, agents]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1df78a71",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
"Agents are systems that use LLMs as reasoning engines to determine which actions to take and the inputs to pass them.\n",
|
||||
"After executing actions, the results can be fed back into the LLM to determine whether more actions are needed, or whether it is okay to finish.\n",
|
||||
"\n",
|
||||
"In this tutorial we will build an agent that can interact with a search engine. You will be able to ask this agent questions, watch it call the search tool, and have conversations with it.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"In following this tutorial, you will learn how to:\n",
|
||||
"\n",
|
||||
"- Use [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n",
|
||||
"- Use a Search [Tool](/docs/concepts/#tools) to look up information from the Internet\n",
|
||||
"- Compose a [LangGraph Agent](/docs/concepts/#agents), which use an LLM to determine actions and then execute them\n",
|
||||
"- Debug and trace your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"## End-to-end agent\n",
|
||||
"\n",
|
||||
"The code snippet below represents a fully functional agent that uses an LLM to decide which tools to use. It is equipped with a generic search tool. It has conversational memory - meaning that it can be used as a multi-turn chatbot.\n",
|
||||
"\n",
|
||||
"In the rest of the guide, we will walk through the individual components and what each part does - but if you want to just grab some code and get started, feel free to use this!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a79bb782",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content=\"Hello Bob! Since you didn't ask a specific question, I don't need to use any tools to respond. It's nice to meet you. San Francisco is a wonderful city with lots to see and do. I hope you're enjoying living there. Please let me know if you have any other questions!\", response_metadata={'id': 'msg_01Mmfzfs9m4XMgVzsCZYMWqH', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 271, 'output_tokens': 65}}, id='run-44c57f9c-a637-4888-b7d9-6d985031ae48-0', usage_metadata={'input_tokens': 271, 'output_tokens': 65, 'total_tokens': 336})]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content=[{'text': 'To get current weather information for your location in San Francisco, let me invoke the search tool:', 'type': 'text'}, {'id': 'toolu_01BGEyQaSz3pTq8RwUUHSRoo', 'input': {'query': 'san francisco weather'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}], response_metadata={'id': 'msg_013AVSVsRLKYZjduLpJBY4us', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 347, 'output_tokens': 80}}, id='run-de7923b6-5ee2-4ebe-bd95-5aed4933d0e3-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'san francisco weather'}, 'id': 'toolu_01BGEyQaSz3pTq8RwUUHSRoo'}], usage_metadata={'input_tokens': 347, 'output_tokens': 80, 'total_tokens': 427})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1717238643, \\'localtime\\': \\'2024-06-01 3:44\\'}, \\'current\\': {\\'last_updated_epoch\\': 1717237800, \\'last_updated\\': \\'2024-06-01 03:30\\', \\'temp_c\\': 12.0, \\'temp_f\\': 53.6, \\'is_day\\': 0, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/night/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 5.6, \\'wind_kph\\': 9.0, \\'wind_degree\\': 310, \\'wind_dir\\': \\'NW\\', \\'pressure_mb\\': 1013.0, \\'pressure_in\\': 29.92, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 88, \\'cloud\\': 100, \\'feelslike_c\\': 10.5, \\'feelslike_f\\': 50.8, \\'windchill_c\\': 9.3, \\'windchill_f\\': 48.7, \\'heatindex_c\\': 11.1, \\'heatindex_f\\': 51.9, \\'dewpoint_c\\': 8.8, \\'dewpoint_f\\': 47.8, \\'vis_km\\': 6.4, \\'vis_miles\\': 3.0, \\'uv\\': 1.0, \\'gust_mph\\': 12.5, \\'gust_kph\\': 20.1}}\"}, {\"url\": \"https://www.timeanddate.com/weather/usa/san-francisco/historic\", \"content\": \"Past Weather in San Francisco, California, USA \\\\u2014 Yesterday and Last 2 Weeks. Time/General. Weather. Time Zone. DST Changes. Sun & Moon. Weather Today Weather Hourly 14 Day Forecast Yesterday/Past Weather Climate (Averages) Currently: 68 \\\\u00b0F. Passing clouds.\"}]', name='tavily_search_results_json', tool_call_id='toolu_01BGEyQaSz3pTq8RwUUHSRoo')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Based on the search results, the current weather in San Francisco is:\\n\\nTemperature: 53.6°F (12°C)\\nConditions: Misty\\nWind: 5.6 mph (9 kph) from the Northwest\\nHumidity: 88%\\nCloud Cover: 100% \\n\\nThe results provide detailed information like wind chill, heat index, visibility and more. It looks like a typical cool, foggy morning in San Francisco. Let me know if you need any other details about the weather where you live!', response_metadata={'id': 'msg_019WGLbaojuNdbCnqac7zaGW', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1035, 'output_tokens': 120}}, id='run-1bb68bf3-b212-4ef4-8a31-10c830421c78-0', usage_metadata={'input_tokens': 1035, 'output_tokens': 120, 'total_tokens': 1155})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Import relevant functionality\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langgraph.checkpoint.sqlite import SqliteSaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"# Create the agent\n",
|
||||
"memory = SqliteSaver.from_conn_string(\":memory:\")\n",
|
||||
"model = ChatAnthropic(model_name=\"claude-3-sonnet-20240229\")\n",
|
||||
"search = TavilySearchResults(max_results=2)\n",
|
||||
"tools = [search]\n",
|
||||
"agent_executor = create_react_agent(model, tools, checkpointer=memory)\n",
|
||||
"\n",
|
||||
"# Use the agent\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"for chunk in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"hi im bob! and i live in sf\")]}, config\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")\n",
|
||||
"\n",
|
||||
"for chunk in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats the weather where I live?\")]}, config\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f4c03f40-1328-412d-8a48-1db0cd481b77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
"Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.\n",
|
||||
"The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.\n",
|
||||
"\n",
|
||||
"In this tutorial we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Concepts we will cover are:\n",
|
||||
"- Using [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n",
|
||||
"- Creating a [Retriever](/docs/concepts/#retrievers) to expose specific information to our agent\n",
|
||||
"- Using a Search [Tool](/docs/concepts/#tools) to look up things online\n",
|
||||
"- Using [LangGraph Agents](/docs/concepts/#agents) which use an LLM to think about what to do and then execute upon that\n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Jupyter Notebook\n",
|
||||
"\n",
|
||||
"This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n",
|
||||
"This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect interactive environments for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc), and observing these cases is a great way to better understand building with LLMs.\n",
|
||||
"\n",
|
||||
"This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"To install LangChain run:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import Tabs from '@theme/Tabs';\n",
|
||||
"import TabItem from '@theme/TabItem';\n",
|
||||
"import CodeBlock from \"@theme/CodeBlock\";\n",
|
||||
"\n",
|
||||
"<Tabs>\n",
|
||||
" <TabItem value=\"pip\" label=\"Pip\" default>\n",
|
||||
" <CodeBlock language=\"bash\">pip install langchain</CodeBlock>\n",
|
||||
" </TabItem>\n",
|
||||
" <TabItem value=\"conda\" label=\"Conda\">\n",
|
||||
" <CodeBlock language=\"bash\">conda install langchain -c conda-forge</CodeBlock>\n",
|
||||
" </TabItem>\n",
|
||||
"</Tabs>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To install LangChain run:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "60bb3eb1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain-community langgraph langchain-anthropic tavily-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2ee337ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
|
||||
"\n",
|
||||
"### LangSmith\n",
|
||||
@@ -86,7 +152,25 @@
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n",
|
||||
"```\n"
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Tavily\n",
|
||||
"\n",
|
||||
"We will be using [Tavily](/docs/integrations/tools/tavily_search) (a search engine) as a tool.\n",
|
||||
"In order to use it, you will need to get and set an API key:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export TAVILY_API_KEY=\"...\"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Or, if in a notebook, you can set it with:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"TAVILY_API_KEY\"] = getpass.getpass()\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -96,18 +180,7 @@
|
||||
"source": [
|
||||
"## Define tools\n",
|
||||
"\n",
|
||||
"We first need to create the tools we want to use. We will use two tools: [Tavily](/docs/integrations/tools/tavily_search) (to search online) and then a retriever over a local index we will create\n",
|
||||
"\n",
|
||||
"### [Tavily](/docs/integrations/tools/tavily_search)\n",
|
||||
"\n",
|
||||
"We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n",
|
||||
"Note that this requires an API key - they have a free tier, but if you don't have one or don't want to create one, you can always ignore this step.\n",
|
||||
"\n",
|
||||
"Once you create your API key, you will need to export that as:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export TAVILY_API_KEY=\"...\"\n",
|
||||
"```"
|
||||
"We first need to create the tools we want to use. Our main tool of choice will be [Tavily](/docs/integrations/tools/tavily_search) - a search engine. We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,147 +188,30 @@
|
||||
"execution_count": 2,
|
||||
"id": "482ce13d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9cc86c0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = TavilySearchResults(max_results=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e593bbf6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'url': 'https://weather.com/weather/tenday/l/San Francisco CA USCA0987:1:US',\n",
|
||||
" 'content': \"Comfy & Cozy\\nThat's Not What Was Expected\\nOutside\\n'No-Name Storms' In Florida\\nGifts From On High\\nWhat To Do For Wheezing\\nSurviving The Season\\nStay Safe\\nAir Quality Index\\nAir quality is considered satisfactory, and air pollution poses little or no risk.\\n Health & Activities\\nSeasonal Allergies and Pollen Count Forecast\\nNo pollen detected in your area\\nCold & Flu Forecast\\nFlu risk is low in your area\\nWe recognize our responsibility to use data and technology for good. recents\\nSpecialty Forecasts\\n10 Day Weather-San Francisco, CA\\nToday\\nMon 18 | Day\\nConsiderable cloudiness. Tue 19\\nTue 19 | Day\\nLight rain early...then remaining cloudy with showers in the afternoon. Wed 27\\nWed 27 | Day\\nOvercast with rain showers at times.\"},\n",
|
||||
" {'url': 'https://www.accuweather.com/en/us/san-francisco/94103/hourly-weather-forecast/347629',\n",
|
||||
" 'content': 'Hourly weather forecast in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.'}]"
|
||||
"[{'url': 'https://www.weatherapi.com/',\n",
|
||||
" 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1717238703, 'localtime': '2024-06-01 3:45'}, 'current': {'last_updated_epoch': 1717237800, 'last_updated': '2024-06-01 03:30', 'temp_c': 12.0, 'temp_f': 53.6, 'is_day': 0, 'condition': {'text': 'Mist', 'icon': '//cdn.weatherapi.com/weather/64x64/night/143.png', 'code': 1030}, 'wind_mph': 5.6, 'wind_kph': 9.0, 'wind_degree': 310, 'wind_dir': 'NW', 'pressure_mb': 1013.0, 'pressure_in': 29.92, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 88, 'cloud': 100, 'feelslike_c': 10.5, 'feelslike_f': 50.8, 'windchill_c': 9.3, 'windchill_f': 48.7, 'heatindex_c': 11.1, 'heatindex_f': 51.9, 'dewpoint_c': 8.8, 'dewpoint_f': 47.8, 'vis_km': 6.4, 'vis_miles': 3.0, 'uv': 1.0, 'gust_mph': 12.5, 'gust_kph': 20.1}}\"},\n",
|
||||
" {'url': 'https://www.wunderground.com/hourly/us/ca/san-francisco/date/2024-01-06',\n",
|
||||
" 'content': 'Current Weather for Popular Cities . San Francisco, CA 58 ° F Partly Cloudy; Manhattan, NY warning 51 ° F Cloudy; Schiller Park, IL (60176) warning 51 ° F Fair; Boston, MA warning 41 ° F ...'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.invoke(\"what is the weather in SF\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e8097977",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retriever\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"\n",
|
||||
"We will also create a retriever over some data of our own. For a deeper explanation of each step here, see [this tutorial](/docs/tutorials/rag)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9c9ce713",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"loader = WebBaseLoader(\"https://docs.smith.langchain.com/overview\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"documents = RecursiveCharacterTextSplitter(\n",
|
||||
" chunk_size=1000, chunk_overlap=200\n",
|
||||
").split_documents(docs)\n",
|
||||
"vector = FAISS.from_documents(documents, OpenAIEmbeddings())\n",
|
||||
"retriever = vector.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "dae53ec6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='import Clientfrom langsmith.evaluation import evaluateclient = Client()# Define dataset: these are your test casesdataset_name = \"Sample Dataset\"dataset = client.create_dataset(dataset_name, description=\"A sample dataset in LangSmith.\")client.create_examples( inputs=[ {\"postfix\": \"to LangSmith\"}, {\"postfix\": \"to Evaluations in LangSmith\"}, ], outputs=[ {\"output\": \"Welcome to LangSmith\"}, {\"output\": \"Welcome to Evaluations in LangSmith\"}, ], dataset_id=dataset.id,)# Define your evaluatordef exact_match(run, example): return {\"score\": run.outputs[\"output\"] == example.outputs[\"output\"]}experiment_results = evaluate( lambda input: \"Welcome \" + input[\\'postfix\\'], # Your AI system goes here data=dataset_name, # The data to predict and grade over evaluators=[exact_match], # The evaluators to score the results experiment_prefix=\"sample-experiment\", # The name of the experiment metadata={ \"version\": \"1.0.0\", \"revision_id\":', metadata={'source': 'https://docs.smith.langchain.com/overview', 'title': 'Getting started with LangSmith | 🦜️🛠️ LangSmith', 'description': 'Introduction', 'language': 'en'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"how to upload a dataset\")[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "04aeca39",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that we have populated our index that we will do doing retrieval over, we can easily turn it into a tool (the format needed for an agent to properly use it)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "117594b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.retriever import create_retriever_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7280b031",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever_tool = create_retriever_tool(\n",
|
||||
" retriever,\n",
|
||||
" \"langsmith_search\",\n",
|
||||
" \"Search for information about LangSmith. For any questions about LangSmith, you must use this tool!\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3b47c1d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Tools\n",
|
||||
"\n",
|
||||
"Now that we have created both, we can create a list of tools that we will use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "b8e8e710",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [search, retriever_tool]"
|
||||
"search = TavilySearchResults(max_results=2)\n",
|
||||
"search_results = search.invoke(\"what is the weather in SF\")\n",
|
||||
"print(search_results)\n",
|
||||
"# If we want, we can create other tools.\n",
|
||||
"# Once we have all the tools we want, we can put them in a list that we will reference later.\n",
|
||||
"tools = [search]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -276,7 +232,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 3,
|
||||
"id": "69185491",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -284,9 +240,9 @@
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\")"
|
||||
"model = ChatAnthropic(model=\"claude-3-sonnet-20240229\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -299,17 +255,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 4,
|
||||
"id": "c96c960b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello! How can I assist you today?'"
|
||||
"'Hi there!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -331,7 +287,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 5,
|
||||
"id": "ba692a74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -349,7 +305,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 6,
|
||||
"id": "b6a7e925",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -357,7 +313,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ContentString: Hello! How can I assist you today?\n",
|
||||
"ContentString: Hello!\n",
|
||||
"ToolCalls: []\n"
|
||||
]
|
||||
}
|
||||
@@ -379,7 +335,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 7,
|
||||
"id": "688b465d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -387,8 +343,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ContentString: \n",
|
||||
"ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in SF'}, 'id': 'call_nfE1XbCqZ8eJsB8rNdn4MQZQ'}]\n"
|
||||
"ContentString: [{'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp', 'input': {'query': 'weather san francisco'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n",
|
||||
"ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'weather san francisco'}, 'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -404,7 +360,7 @@
|
||||
"id": "83c4bcd3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see that there's now no content, but there is a tool call! It wants us to call the Tavily Search tool.\n",
|
||||
"We can see that there's now no text content, but there is a tool call! It wants us to call the Tavily Search tool.\n",
|
||||
"\n",
|
||||
"This isn't calling that tool yet - it's just telling us to. In order to actually calll it, we'll want to create our agent."
|
||||
]
|
||||
@@ -432,14 +388,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 9,
|
||||
"id": "89cf72b4-6046-4b47-8f27-5522d8cb8036",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import chat_agent_executor\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(model, tools)"
|
||||
"agent_executor = create_react_agent(model, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -456,18 +412,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 10,
|
||||
"id": "114ba50d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!', id='1535b889-10a5-45d0-a1e1-dd2e60d4bc04'),\n",
|
||||
" AIMessage(content='Hello! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 129, 'total_tokens': 139}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-2c94c074-bdc9-4f01-8fd7-71cfc4777d55-0')]"
|
||||
"[HumanMessage(content='hi!', id='a820fcc5-9b87-457a-9af0-f21768143ee3'),\n",
|
||||
" AIMessage(content='Hello!', response_metadata={'id': 'msg_01VbC493X1VEDyusgttiEr1z', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 264, 'output_tokens': 5}}, id='run-0e0ddae8-a85b-4bd6-947c-c36c857a4698-0', usage_metadata={'input_tokens': 264, 'output_tokens': 5, 'total_tokens': 269})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -485,66 +441,25 @@
|
||||
"source": [
|
||||
"In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the [LangSmith trace](https://smith.langchain.com/public/28311faa-e135-4d6a-ab6b-caecf6482aaa/r)\n",
|
||||
"\n",
|
||||
"Let's now try it out on an example where it should be invoking the retriever"
|
||||
"Let's now try it out on an example where it should be invoking the tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "3fa4780a",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='how can langsmith help with testing?', id='04f4fe8f-391a-427c-88af-1fa064db304c'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_FNIgdO97wo51sKx3XZOGLHqT', 'function': {'arguments': '{\\n \"query\": \"how can LangSmith help with testing\"\\n}', 'name': 'langsmith_search'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 135, 'total_tokens': 157}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-51f6ea92-84e1-43a5-b1f2-bc0c12d8613f-0', tool_calls=[{'name': 'langsmith_search', 'args': {'query': 'how can LangSmith help with testing'}, 'id': 'call_FNIgdO97wo51sKx3XZOGLHqT'}]),\n",
|
||||
" ToolMessage(content=\"Getting started with LangSmith | 🦜️🛠️ LangSmith\\n\\nSkip to main contentLangSmith API DocsSearchGo to AppQuick StartUser GuideTracingEvaluationProduction Monitoring & AutomationsPrompt HubProxyPricingSelf-HostingCookbookQuick StartOn this pageGetting started with LangSmithIntroduction\\u200bLangSmith is a platform for building production-grade LLM applications. It allows you to closely monitor and evaluate your application, so you can ship quickly and with confidence. Use of LangChain is not necessary - LangSmith works on its own!Install LangSmith\\u200bWe offer Python and Typescript SDKs for all your LangSmith needs.PythonTypeScriptpip install -U langsmithyarn add langchain langsmithCreate an API key\\u200bTo create an API key head to the setting pages. Then click Create API Key.Setup your environment\\u200bShellexport LANGCHAIN_TRACING_V2=trueexport LANGCHAIN_API_KEY=<your-api-key># The below examples use the OpenAI API, though it's not necessary in generalexport OPENAI_API_KEY=<your-openai-api-key>Log your first trace\\u200bWe provide multiple ways to log traces\\n\\nLearn about the workflows LangSmith supports at each stage of the LLM application lifecycle.Pricing: Learn about the pricing model for LangSmith.Self-Hosting: Learn about self-hosting options for LangSmith.Proxy: Learn about the proxy capabilities of LangSmith.Tracing: Learn about the tracing capabilities of LangSmith.Evaluation: Learn about the evaluation capabilities of LangSmith.Prompt Hub Learn about the Prompt Hub, a prompt management tool built into LangSmith.Additional Resources\\u200bLangSmith Cookbook: A collection of tutorials and end-to-end walkthroughs using LangSmith.LangChain Python: Docs for the Python LangChain library.LangChain Python API Reference: documentation to review the core APIs of LangChain.LangChain JS: Docs for the TypeScript LangChain libraryDiscord: Join us on our Discord to discuss all things LangChain!FAQ\\u200bHow do I migrate projects between organizations?\\u200bCurrently we do not support project migration betwen organizations. While you can manually imitate this by\\n\\nteam deals with sensitive data that cannot be logged. How can I ensure that only my team can access it?\\u200bIf you are interested in a private deployment of LangSmith or if you need to self-host, please reach out to us at sales@langchain.dev. Self-hosting LangSmith requires an annual enterprise license that also comes with support and formalized access to the LangChain team.Was this page helpful?NextUser GuideIntroductionInstall LangSmithCreate an API keySetup your environmentLog your first traceCreate your first evaluationNext StepsAdditional ResourcesFAQHow do I migrate projects between organizations?Why aren't my runs aren't showing up in my project?My team deals with sensitive data that cannot be logged. How can I ensure that only my team can access it?CommunityDiscordTwitterGitHubDocs CodeLangSmith SDKPythonJS/TSMoreHomepageBlogLangChain Python DocsLangChain JS/TS DocsCopyright © 2024 LangChain, Inc.\", name='langsmith_search', id='f286c7e7-6514-4621-ac60-e4079b37ebe2', tool_call_id='call_FNIgdO97wo51sKx3XZOGLHqT'),\n",
|
||||
" AIMessage(content=\"LangSmith is a platform that can significantly aid in testing by offering several features:\\n\\n1. **Tracing**: LangSmith provides robust tracing capabilities that enable you to monitor your application closely. This feature is particularly useful for tracking the behavior of your application and identifying any potential issues.\\n\\n2. **Evaluation**: LangSmith allows you to perform comprehensive evaluations of your application. This can help you assess the performance of your application under various conditions and make necessary adjustments to enhance its functionality.\\n\\n3. **Production Monitoring & Automations**: With LangSmith, you can keep a close eye on your application when it's in active use. The platform provides tools for automatic monitoring and managing routine tasks, helping to ensure your application runs smoothly.\\n\\n4. **Prompt Hub**: It's a prompt management tool built into LangSmith. This feature can be instrumental when testing various prompts in your application.\\n\\nOverall, LangSmith helps you build production-grade LLM applications with confidence, providing necessary tools for monitoring, evaluation, and automation.\", response_metadata={'token_usage': {'completion_tokens': 200, 'prompt_tokens': 782, 'total_tokens': 982}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4b80db7e-9a26-4043-8b6b-922f847f9c80-0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = agent_executor.invoke(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"how can langsmith help with testing?\")]}\n",
|
||||
")\n",
|
||||
"response[\"messages\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2d94242",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's take a look at the [LangSmith trace](https://smith.langchain.com/public/853f62d0-3421-4dba-b30a-7277ce2bdcdf/r) to see what is going on under the hood.\n",
|
||||
"\n",
|
||||
"Note that the state we get back at the end also contains the tool call and the tool response message.\n",
|
||||
"\n",
|
||||
"Now let's try one where it needs to call the search tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 11,
|
||||
"id": "77c2f769",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='whats the weather in sf?', id='e6b716e6-da57-41de-a227-fee281fda588'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_TGDKm0saxuGKJD5OYOXWRvLe', 'function': {'arguments': '{\\n \"query\": \"current weather in San Francisco\"\\n}', 'name': 'tavily_search_results_json'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 134, 'total_tokens': 157}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-fd7d5854-2eab-4fca-ad9e-b3de8d587614-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'current weather in San Francisco'}, 'id': 'call_TGDKm0saxuGKJD5OYOXWRvLe'}]),\n",
|
||||
" ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1714426800, \\'localtime\\': \\'2024-04-29 14:40\\'}, \\'current\\': {\\'last_updated_epoch\\': 1714426200, \\'last_updated\\': \\'2024-04-29 14:30\\', \\'temp_c\\': 17.8, \\'temp_f\\': 64.0, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Sunny\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/113.png\\', \\'code\\': 1000}, \\'wind_mph\\': 23.0, \\'wind_kph\\': 37.1, \\'wind_degree\\': 290, \\'wind_dir\\': \\'WNW\\', \\'pressure_mb\\': 1019.0, \\'pressure_in\\': 30.09, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 50, \\'cloud\\': 0, \\'feelslike_c\\': 17.8, \\'feelslike_f\\': 64.0, \\'vis_km\\': 16.0, \\'vis_miles\\': 9.0, \\'uv\\': 5.0, \\'gust_mph\\': 27.5, \\'gust_kph\\': 44.3}}\"}, {\"url\": \"https://www.wunderground.com/hourly/us/ca/san-francisco/94125/date/2024-4-29\", \"content\": \"Current Weather for Popular Cities . San Francisco, CA warning 59 \\\\u00b0 F Mostly Cloudy; Manhattan, NY 56 \\\\u00b0 F Fair; Schiller Park, IL (60176) warning 58 \\\\u00b0 F Mostly Cloudy; Boston, MA 52 \\\\u00b0 F Sunny ...\"}]', name='tavily_search_results_json', id='aa0d8c3d-23b5-425a-ad05-3c174fc04892', tool_call_id='call_TGDKm0saxuGKJD5OYOXWRvLe'),\n",
|
||||
" AIMessage(content='The current weather in San Francisco, California is sunny with a temperature of 64.0°F (17.8°C). The wind is coming from the WNW at a speed of 23.0 mph. The humidity level is at 50%. There is no precipitation and the cloud cover is 0%. The visibility is 16.0 km. The UV index is 5.0. Please note that this information is as of 14:30 on April 29, 2024, according to [Weather API](https://www.weatherapi.com/).', response_metadata={'token_usage': {'completion_tokens': 117, 'prompt_tokens': 620, 'total_tokens': 737}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-2359b41b-cab6-40c3-b6d9-7bdf7195a601-0')]"
|
||||
"[HumanMessage(content='whats the weather in sf?', id='1d6c96bb-4ddb-415c-a579-a07d5264de0d'),\n",
|
||||
" AIMessage(content=[{'id': 'toolu_01Y5EK4bw2LqsQXeaUv8iueF', 'input': {'query': 'weather in san francisco'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}], response_metadata={'id': 'msg_0132wQUcEduJ8UKVVVqwJzM4', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 269, 'output_tokens': 61}}, id='run-26d5e5e8-d4fd-46d2-a197-87b95b10e823-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'weather in san francisco'}, 'id': 'toolu_01Y5EK4bw2LqsQXeaUv8iueF'}], usage_metadata={'input_tokens': 269, 'output_tokens': 61, 'total_tokens': 330}),\n",
|
||||
" ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1717238703, \\'localtime\\': \\'2024-06-01 3:45\\'}, \\'current\\': {\\'last_updated_epoch\\': 1717237800, \\'last_updated\\': \\'2024-06-01 03:30\\', \\'temp_c\\': 12.0, \\'temp_f\\': 53.6, \\'is_day\\': 0, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/night/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 5.6, \\'wind_kph\\': 9.0, \\'wind_degree\\': 310, \\'wind_dir\\': \\'NW\\', \\'pressure_mb\\': 1013.0, \\'pressure_in\\': 29.92, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 88, \\'cloud\\': 100, \\'feelslike_c\\': 10.5, \\'feelslike_f\\': 50.8, \\'windchill_c\\': 9.3, \\'windchill_f\\': 48.7, \\'heatindex_c\\': 11.1, \\'heatindex_f\\': 51.9, \\'dewpoint_c\\': 8.8, \\'dewpoint_f\\': 47.8, \\'vis_km\\': 6.4, \\'vis_miles\\': 3.0, \\'uv\\': 1.0, \\'gust_mph\\': 12.5, \\'gust_kph\\': 20.1}}\"}, {\"url\": \"https://www.timeanddate.com/weather/usa/san-francisco/hourly\", \"content\": \"Sun & Moon. Weather Today Weather Hourly 14 Day Forecast Yesterday/Past Weather Climate (Averages) Currently: 59 \\\\u00b0F. Passing clouds. (Weather station: San Francisco International Airport, USA). See more current weather.\"}]', name='tavily_search_results_json', id='37aa1fd9-b232-4a02-bd22-bc5b9b44a22c', tool_call_id='toolu_01Y5EK4bw2LqsQXeaUv8iueF'),\n",
|
||||
" AIMessage(content='Based on the search results, here is a summary of the current weather in San Francisco:\\n\\nThe weather in San Francisco is currently misty with a temperature of around 53°F (12°C). There is complete cloud cover and moderate winds from the northwest around 5-9 mph (9-14 km/h). Humidity is high at 88%. Visibility is around 3 miles (6.4 km). \\n\\nThe results provide an hourly forecast as well as current conditions from a couple different weather sources. Let me know if you need any additional details about the San Francisco weather!', response_metadata={'id': 'msg_01BRX9mrT19nBDdHYtR7wJ92', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 920, 'output_tokens': 132}}, id='run-d0325583-3ddc-4432-b2b2-d023eb97660f-0', usage_metadata={'input_tokens': 920, 'output_tokens': 132, 'total_tokens': 1052})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -576,7 +491,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": null,
|
||||
"id": "532d6557",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -618,7 +533,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": null,
|
||||
"id": "a3fb262c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -686,7 +601,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": null,
|
||||
"id": "c4073e35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -698,21 +613,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 12,
|
||||
"id": "e64a944e-f9ac-43cf-903c-d3d28d765377",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" model, tools, checkpointer=memory\n",
|
||||
")\n",
|
||||
"agent_executor = create_react_agent(model, tools, checkpointer=memory)\n",
|
||||
"\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 13,
|
||||
"id": "a13462d0-2d02-4474-921e-15a1ba1fa274",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -720,7 +633,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 131, 'total_tokens': 142}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-607733e3-4b8d-4137-ae66-8a4b8ccc8d40-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content=\"Hello Bob! It's nice to meet you again.\", response_metadata={'id': 'msg_013C1z2ZySagEFwmU1EsysR2', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1162, 'output_tokens': 14}}, id='run-f878acfd-d195-44e8-9166-e2796317e3f8-0', usage_metadata={'input_tokens': 1162, 'output_tokens': 14, 'total_tokens': 1176})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -735,7 +648,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 14,
|
||||
"id": "56d8028b-5dbc-40b2-86f5-ed60631d86a3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -743,7 +656,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='Your name is Bob. How can I assist you further?', response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 154, 'total_tokens': 167}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-e1181ba6-732d-4564-b479-9f1ab6bf01f6-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='You mentioned your name is Bob when you introduced yourself earlier. So your name is Bob.', response_metadata={'id': 'msg_01WNwnRNGwGDRw6vRdivt6i1', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1184, 'output_tokens': 21}}, id='run-f5c0b957-8878-405a-9d4b-a7cd38efe81f-0', usage_metadata={'input_tokens': 1184, 'output_tokens': 21, 'total_tokens': 1205})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -764,6 +677,38 @@
|
||||
"Example [LangSmith trace](https://smith.langchain.com/public/fa73960b-0f7d-4910-b73d-757a12f33b2b/r)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae908088",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If I want to start a new conversation, all I have to do is change the `thread_id` used"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "24460239",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant without personal information about you, I don't have a specific name associated with our conversation.\", response_metadata={'id': 'msg_01NoaXNNYZKSoBncPcLkdcbo', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 267, 'output_tokens': 36}}, id='run-c9f7df3d-525a-4d8f-bbcf-a5b4a5d2e4b0-0', usage_metadata={'input_tokens': 267, 'output_tokens': 36, 'total_tokens': 303})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config = {\"configurable\": {\"thread_id\": \"xyz123\"}}\n",
|
||||
"for chunk in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats my name?\")]}, config\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c029798f",
|
||||
@@ -804,7 +749,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,10 +2,15 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_position: 1\n",
|
||||
"keywords: [conversationchain]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
" # Having a good description can help improve extraction results.\n",
|
||||
" name: Optional[str] = Field(default=None, description=\"The name of the person\")\n",
|
||||
" hair_color: Optional[str] = Field(\n",
|
||||
" default=None, description=\"The color of the peron's hair if known\"\n",
|
||||
" default=None, description=\"The color of the person's hair if known\"\n",
|
||||
" )\n",
|
||||
" height_in_meters: Optional[str] = Field(\n",
|
||||
" default=None, description=\"Height measured in meters\"\n",
|
||||
@@ -303,7 +303,7 @@
|
||||
" # Having a good description can help improve extraction results.\n",
|
||||
" name: Optional[str] = Field(default=None, description=\"The name of the person\")\n",
|
||||
" hair_color: Optional[str] = Field(\n",
|
||||
" default=None, description=\"The color of the peron's hair if known\"\n",
|
||||
" default=None, description=\"The color of the person's hair if known\"\n",
|
||||
" )\n",
|
||||
" height_in_meters: Optional[str] = Field(\n",
|
||||
" default=None, description=\"Height measured in meters\"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@ sidebar_class_name: hidden
|
||||
New to LangChain or to LLM app development in general? Read this material to quickly get up and running.
|
||||
|
||||
### Basics
|
||||
- [Build a Simple LLM Application](/docs/tutorials/llm_chain)
|
||||
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain)
|
||||
- [Build a Chatbot](/docs/tutorials/chatbot)
|
||||
- [Build vector stores and retrievers](/docs/tutorials/retrievers)
|
||||
- [Build an Agent](/docs/tutorials/agents)
|
||||
@@ -19,6 +19,7 @@ New to LangChain or to LLM app development in general? Read this material to qui
|
||||
- [Build a Query Analysis System](/docs/tutorials/query_analysis)
|
||||
- [Build a local RAG application](/docs/tutorials/local_rag)
|
||||
- [Build a Question Answering application over a Graph Database](/docs/tutorials/graph)
|
||||
- [Build a PDF ingestion and Question/Answering system](/docs/tutorials/pdf_qa/)
|
||||
|
||||
### Specialized tasks
|
||||
- [Build an Extraction Chain](/docs/tutorials/extraction)
|
||||
@@ -26,5 +27,20 @@ New to LangChain or to LLM app development in general? Read this material to qui
|
||||
- [Classify text into labels](/docs/tutorials/classification)
|
||||
- [Summarize text](/docs/tutorials/summarization)
|
||||
|
||||
### LangGraph
|
||||
|
||||
LangGraph is an extension of LangChain aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
LangGraph documentation is currently hosted on a separate site.
|
||||
You can peruse [LangGraph tutorials here](https://langchain-ai.github.io/langgraph/tutorials/).
|
||||
|
||||
### LangSmith
|
||||
|
||||
LangSmith allows you to closely trace, monitor and evaluate your LLM application.
|
||||
It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith tutorials here](https://docs.smith.langchain.com/tutorials/).
|
||||
|
||||
For a longer list of tutorials, see our [cookbook section](https://github.com/langchain-ai/langchain/tree/master/cookbook).
|
||||
|
||||
@@ -15,25 +15,23 @@
|
||||
"id": "9316da0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build a Simple LLM Application\n",
|
||||
"# Build a Simple LLM Application with LCEL\n",
|
||||
"\n",
|
||||
"In this quickstart we'll show you how to build a simple LLM application. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call!\n",
|
||||
"In this quickstart we'll show you how to build a simple LLM application with LangChain. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call!\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Concepts we will cover are:\n",
|
||||
"After reading this tutorial, you'll have a high level overview of:\n",
|
||||
"\n",
|
||||
"- Using [language models](/docs/concepts/#chat-models)\n",
|
||||
"\n",
|
||||
"- Using [PromptTemplates](/docs/concepts/#prompt-templates) and [OutputParsers](/docs/concepts/#output-parsers)\n",
|
||||
"\n",
|
||||
"- [Chaining](/docs/concepts/#langchain-expression-language) a PromptTemplate + LLM + OutputParser using LangChain\n",
|
||||
"- Using [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language-lcel) to chain components together\n",
|
||||
"\n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"- Deploying your application with [LangServe](/docs/concepts/#langserve)\n",
|
||||
"\n",
|
||||
"That's a fair amount to cover! Let's dive in.\n",
|
||||
"Let's dive in!\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@@ -95,11 +93,6 @@
|
||||
"id": "e5558ca9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Detailed walkthrough\n",
|
||||
"\n",
|
||||
"In this guide we will build an application to translate user input from one language to another.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Using Language Models\n",
|
||||
"\n",
|
||||
"First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n",
|
||||
@@ -413,7 +406,9 @@
|
||||
"id": "5a4267a8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now combine this with the model and the output parser from above. This will chain all three components together."
|
||||
"## Chaining together components with LCEL\n",
|
||||
"\n",
|
||||
"We can now combine this with the model and the output parser from above using the pipe (`|`) operator:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -452,7 +447,9 @@
|
||||
"id": "0b19cecb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/bc49bec0-6b13-4726-967f-dbd3448b786d/r)"
|
||||
"This is a simple example of using [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language-lcel) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support.\n",
|
||||
"\n",
|
||||
"If we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/bc49bec0-6b13-4726-967f-dbd3448b786d/r)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -584,15 +581,23 @@
|
||||
"source": [
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"That's it! In this tutorial we've walked through creating our first simple LLM application. We've learned how to work with language models, how to parse their outputs, how to create a prompt template, how to get great observability into chains you create with LangSmith, and how to deploy them with LangServe.\n",
|
||||
"That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to parse their outputs, how to create a prompt template, chaining them with LCEL, how to get great observability into chains you create with LangSmith, and how to deploy them with LangServe.\n",
|
||||
"\n",
|
||||
"This just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources!\n",
|
||||
"\n",
|
||||
"For more in-depth tutorials, check out out [Tutorials](/docs/tutorials) section.\n",
|
||||
"For further reading on the core concepts of LangChain, we've got detailed [Conceptual Guides](/docs/concepts).\n",
|
||||
"\n",
|
||||
"If you have specific questions on how to accomplish particular tasks, see our [How-To Guides](/docs/how_to) section.\n",
|
||||
"If you have more specific questions on these concepts, check out the following sections of the how-to guides:\n",
|
||||
"\n",
|
||||
"For reading up on the core concepts of LangChain, we've got detailed [Conceptual Guides](/docs/concepts)"
|
||||
"- [LangChain Expression Language (LCEL)](/docs/how_to/#langchain-expression-language-lcel)\n",
|
||||
"- [Prompt templates](/docs/how_to/#prompt-templates)\n",
|
||||
"- [Chat models](/docs/how_to/#chat-models)\n",
|
||||
"- [Output parsers](/docs/how_to/#output-parsers)\n",
|
||||
"- [LangServe](/docs/langserve/)\n",
|
||||
"\n",
|
||||
"And the LangSmith docs:\n",
|
||||
"\n",
|
||||
"- [LangSmith](https://docs.smith.langchain.com)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
339
docs/docs/tutorials/pdf_qa.ipynb
Normal file
339
docs/docs/tutorials/pdf_qa.ipynb
Normal file
@@ -0,0 +1,339 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [pdf, document loader]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build a PDF ingestion and Question/Answering system\n",
|
||||
"\n",
|
||||
"PDF files often hold crucial unstructured data unavailable from other sources. They can be quite lengthy, and unlike plain text files, cannot generally be fed directly into the prompt of a language model.\n",
|
||||
"\n",
|
||||
"In this tutorial, you'll create a system that can answer questions about PDF files. More specifically, you'll use a [Document Loader](/docs/concepts/#document-loaders) to load text in a format usable by an LLM, then build a retrieval-augmented generation (RAG) pipeline to answer questions, including citations from the source material.\n",
|
||||
"\n",
|
||||
"This tutorial will gloss over some concepts more deeply covered in our [RAG](/docs/tutorials/rag/) tutorial, so you may want to go through those first if you haven't already.\n",
|
||||
"\n",
|
||||
"Let's dive in!\n",
|
||||
"\n",
|
||||
"## Loading documents\n",
|
||||
"\n",
|
||||
"First, you'll need to choose a PDF to load. We'll use a document from [Nike's annual public SEC report](https://s1.q4cdn.com/806093406/files/doc_downloads/2023/414759-1-_5_Nike-NPS-Combo_Form-10-K_WR.pdf). It's over 100 pages long, and contains some crucial data mixed with longer explanatory text. However, you can feel free to use a PDF of your choosing.\n",
|
||||
"\n",
|
||||
"Once you've chosen your PDF, the next step is to load it into a format that an LLM can more easily handle, since LLMs generally require text inputs. LangChain has a few different [built-in document loaders](/docs/how_to/document_loader_pdf/) for this purpose which you can experiment with. Below, we'll use one powered by the [`pypdf`](https://pypi.org/project/pypdf/) package that reads from a filepath:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU pypdf langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"107\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import PyPDFLoader\n",
|
||||
"\n",
|
||||
"file_path = \"../example_data/nke-10k-2023.pdf\"\n",
|
||||
"loader = PyPDFLoader(file_path)\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(len(docs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Table of Contents\n",
|
||||
"UNITED STATES\n",
|
||||
"SECURITIES AND EXCHANGE COMMISSION\n",
|
||||
"Washington, D.C. 20549\n",
|
||||
"FORM 10-K\n",
|
||||
"\n",
|
||||
"{'source': '../example_data/nke-10k-2023.pdf', 'page': 0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content[0:100])\n",
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So what just happened?\n",
|
||||
"\n",
|
||||
"- The loader reads the PDF at the specified path into memory.\n",
|
||||
"- It then extracts text data using the `pypdf` package.\n",
|
||||
"- Finally, it creates a LangChain [Document](/docs/concepts/#documents) for each page of the PDF with the page's content and some metadata about where in the document the text came from.\n",
|
||||
"\n",
|
||||
"LangChain has [many other document loaders](/docs/integrations/document_loaders/) for other data sources, or you can create a [custom document loader](/docs/how_to/document_loader_custom/).\n",
|
||||
"\n",
|
||||
"## Question answering with RAG\n",
|
||||
"\n",
|
||||
"Next, you'll prepare the loaded documents for later retrieval. Using a [text splitter](/docs/concepts/#text-splitters), you'll split your loaded documents into smaller documents that can more easily fit into an LLM's context window, then load them into a [vector store](/docs/concepts/#vector-stores). You can then create a [retriever](/docs/concepts/#retrievers) from the vector store for use in our RAG chain:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4o\"`} />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass(\"Anthropic API Key:\")\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langchain_chroma langchain_openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
|
||||
"splits = text_splitter.split_documents(docs)\n",
|
||||
"vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, you'll use some built-in helpers to construct the final `rag_chain`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': \"What was Nike's revenue in 2023?\",\n",
|
||||
" 'context': [Document(page_content='Table of Contents\\nFISCAL 2023 NIKE BRAND REVENUE HIGHLIGHTS\\nThe following tables present NIKE Brand revenues disaggregated by reportable operating segment, distribution channel and major product line:\\nFISCAL 2023 COMPARED TO FISCAL 2022\\n•NIKE, Inc. Revenues were $51.2 billion in fiscal 2023, which increased 10% and 16% compared to fiscal 2022 on a reported and currency-neutral basis, respectively.\\nThe increase was due to higher revenues in North America, Europe, Middle East & Africa (\"EMEA\"), APLA and Greater China, which contributed approximately 7, 6,\\n2 and 1 percentage points to NIKE, Inc. Revenues, respectively.\\n•NIKE Brand revenues, which represented over 90% of NIKE, Inc. Revenues, increased 10% and 16% on a reported and currency-neutral basis, respectively. This\\nincrease was primarily due to higher revenues in Men\\'s, the Jordan Brand, Women\\'s and Kids\\' which grew 17%, 35%,11% and 10%, respectively, on a wholesale\\nequivalent basis.', metadata={'page': 35, 'source': '../example_data/nke-10k-2023.pdf'}),\n",
|
||||
" Document(page_content='Enterprise Resource Planning Platform, data and analytics, demand sensing, insight gathering, and other areas to create an end-to-end technology foundation, which we\\nbelieve will further accelerate our digital transformation. We believe this unified approach will accelerate growth and unlock more efficiency for our business, while driving\\nspeed and responsiveness as we serve consumers globally.\\nFINANCIAL HIGHLIGHTS\\n•In fiscal 2023, NIKE, Inc. achieved record Revenues of $51.2 billion, which increased 10% and 16% on a reported and currency-neutral basis, respectively\\n•NIKE Direct revenues grew 14% from $18.7 billion in fiscal 2022 to $21.3 billion in fiscal 2023, and represented approximately 44% of total NIKE Brand revenues for\\nfiscal 2023\\n•Gross margin for the fiscal year decreased 250 basis points to 43.5% primarily driven by higher product costs, higher markdowns and unfavorable changes in foreign\\ncurrency exchange rates, partially offset by strategic pricing actions', metadata={'page': 30, 'source': '../example_data/nke-10k-2023.pdf'}),\n",
|
||||
" Document(page_content=\"Table of Contents\\nNORTH AMERICA\\n(Dollars in millions) FISCAL 2023FISCAL 2022 % CHANGE% CHANGE\\nEXCLUDING\\nCURRENCY\\nCHANGESFISCAL 2021 % CHANGE% CHANGE\\nEXCLUDING\\nCURRENCY\\nCHANGES\\nRevenues by:\\nFootwear $ 14,897 $ 12,228 22 % 22 %$ 11,644 5 % 5 %\\nApparel 5,947 5,492 8 % 9 % 5,028 9 % 9 %\\nEquipment 764 633 21 % 21 % 507 25 % 25 %\\nTOTAL REVENUES $ 21,608 $ 18,353 18 % 18 %$ 17,179 7 % 7 %\\nRevenues by: \\nSales to Wholesale Customers $ 11,273 $ 9,621 17 % 18 %$ 10,186 -6 % -6 %\\nSales through NIKE Direct 10,335 8,732 18 % 18 % 6,993 25 % 25 %\\nTOTAL REVENUES $ 21,608 $ 18,353 18 % 18 %$ 17,179 7 % 7 %\\nEARNINGS BEFORE INTEREST AND TAXES $ 5,454 $ 5,114 7 % $ 5,089 0 %\\nFISCAL 2023 COMPARED TO FISCAL 2022\\n•North America revenues increased 18% on a currency-neutral basis, primarily due to higher revenues in Men's and the Jordan Brand. NIKE Direct revenues\\nincreased 18%, driven by strong digital sales growth of 23%, comparable store sales growth of 9% and the addition of new stores.\", metadata={'page': 39, 'source': '../example_data/nke-10k-2023.pdf'}),\n",
|
||||
" Document(page_content=\"Table of Contents\\nEUROPE, MIDDLE EAST & AFRICA\\n(Dollars in millions) FISCAL 2023FISCAL 2022 % CHANGE% CHANGE\\nEXCLUDING\\nCURRENCY\\nCHANGESFISCAL 2021 % CHANGE% CHANGE\\nEXCLUDING\\nCURRENCY\\nCHANGES\\nRevenues by:\\nFootwear $ 8,260 $ 7,388 12 % 25 %$ 6,970 6 % 9 %\\nApparel 4,566 4,527 1 % 14 % 3,996 13 % 16 %\\nEquipment 592 564 5 % 18 % 490 15 % 17 %\\nTOTAL REVENUES $ 13,418 $ 12,479 8 % 21 %$ 11,456 9 % 12 %\\nRevenues by: \\nSales to Wholesale Customers $ 8,522 $ 8,377 2 % 15 %$ 7,812 7 % 10 %\\nSales through NIKE Direct 4,896 4,102 19 % 33 % 3,644 13 % 15 %\\nTOTAL REVENUES $ 13,418 $ 12,479 8 % 21 %$ 11,456 9 % 12 %\\nEARNINGS BEFORE INTEREST AND TAXES $ 3,531 $ 3,293 7 % $ 2,435 35 % \\nFISCAL 2023 COMPARED TO FISCAL 2022\\n•EMEA revenues increased 21% on a currency-neutral basis, due to higher revenues in Men's, the Jordan Brand, Women's and Kids'. NIKE Direct revenues\\nincreased 33%, driven primarily by strong digital sales growth of 43% and comparable store sales growth of 22%.\", metadata={'page': 40, 'source': '../example_data/nke-10k-2023.pdf'})],\n",
|
||||
" 'answer': 'According to the financial highlights, Nike, Inc. achieved record revenues of $51.2 billion in fiscal 2023, which increased 10% on a reported basis and 16% on a currency-neutral basis compared to fiscal 2022.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"system_prompt = (\n",
|
||||
" \"You are an assistant for question-answering tasks. \"\n",
|
||||
" \"Use the following pieces of retrieved context to answer \"\n",
|
||||
" \"the question. If you don't know the answer, say that you \"\n",
|
||||
" \"don't know. Use three sentences maximum and keep the \"\n",
|
||||
" \"answer concise.\"\n",
|
||||
" \"\\n\\n\"\n",
|
||||
" \"{context}\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system_prompt),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"question_answer_chain = create_stuff_documents_chain(llm, prompt)\n",
|
||||
"rag_chain = create_retrieval_chain(retriever, question_answer_chain)\n",
|
||||
"\n",
|
||||
"results = rag_chain.invoke({\"input\": \"What was Nike's revenue in 2023?\"})\n",
|
||||
"\n",
|
||||
"results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can see that you get both a final answer in the `answer` key of the results dict, and the `context` the LLM used to generate an answer.\n",
|
||||
"\n",
|
||||
"Examining the values under the `context` further, you can see that they are documents that each contain a chunk of the ingested page content. Usefully, these documents also preserve the original metadata from way back when you first loaded them:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Table of Contents\n",
|
||||
"FISCAL 2023 NIKE BRAND REVENUE HIGHLIGHTS\n",
|
||||
"The following tables present NIKE Brand revenues disaggregated by reportable operating segment, distribution channel and major product line:\n",
|
||||
"FISCAL 2023 COMPARED TO FISCAL 2022\n",
|
||||
"•NIKE, Inc. Revenues were $51.2 billion in fiscal 2023, which increased 10% and 16% compared to fiscal 2022 on a reported and currency-neutral basis, respectively.\n",
|
||||
"The increase was due to higher revenues in North America, Europe, Middle East & Africa (\"EMEA\"), APLA and Greater China, which contributed approximately 7, 6,\n",
|
||||
"2 and 1 percentage points to NIKE, Inc. Revenues, respectively.\n",
|
||||
"•NIKE Brand revenues, which represented over 90% of NIKE, Inc. Revenues, increased 10% and 16% on a reported and currency-neutral basis, respectively. This\n",
|
||||
"increase was primarily due to higher revenues in Men's, the Jordan Brand, Women's and Kids' which grew 17%, 35%,11% and 10%, respectively, on a wholesale\n",
|
||||
"equivalent basis.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(results[\"context\"][0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'page': 35, 'source': '../example_data/nke-10k-2023.pdf'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(results[\"context\"][0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This particular chunk came from page 35 in the original PDF. You can use this data to show which page in the PDF the answer came from, allowing users to quickly verify that answers are based on the source material.\n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"For a deeper dive into RAG, see [this more focused tutorial](/docs/tutorials/rag/) or [our how-to guides](/docs/how_to/#qa-with-rag).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now seen how to load documents from a PDF file with a Document Loader and some techniques you can use to prepare that loaded data for RAG.\n",
|
||||
"\n",
|
||||
"For more on document loaders, you can check out:\n",
|
||||
"\n",
|
||||
"- [The entry in the conceptual guide](/docs/concepts/#document-loaders)\n",
|
||||
"- [Related how-to guides](/docs/how_to/#document-loaders)\n",
|
||||
"- [Available integrations](/docs/integrations/document_loaders/)\n",
|
||||
"- [How to create a custom document loader](/docs/how_to/document_loader_custom/)\n",
|
||||
"\n",
|
||||
"For more on RAG, see:\n",
|
||||
"\n",
|
||||
"- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)\n",
|
||||
"- [Related how-to guides](/docs/how_to/#qa-with-rag)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -45,11 +45,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "ede7fdc0-ef31-483d-bd67-32e4b5c5d527",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-chroma bs4"
|
||||
]
|
||||
},
|
||||
@@ -63,7 +64,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "143787ca-d8e6-4dc9-8281-4374f4d71720",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -71,7 +72,8 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
@@ -92,13 +94,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "07411adb-3722-4f65-ab7f-8f6f57663d11",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
"if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n",
|
||||
" os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -125,7 +128,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "cb58f273-2111-4a9b-8932-9b64c95030c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -140,8 +143,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d8a913b1-0eea-442a-8a64-ec73333f104b",
|
||||
"execution_count": 6,
|
||||
"id": "820244ae-74b4-4593-b392-822979dd91b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -151,20 +154,10 @@
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "820244ae-74b4-4593-b392-822979dd91b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"# 1. Load, chunk and index the contents of the blog to create a retriever.\n",
|
||||
"loader = WebBaseLoader(\n",
|
||||
" web_paths=(\"https://lilianweng.github.io/posts/2023-06-23-agent/\",),\n",
|
||||
@@ -206,17 +199,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "bf55faaf-0d17-4b74-925d-c478b555f7b2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. This process can be achieved through techniques like Chain of Thought (CoT) or Tree of Thoughts, which help agents plan and execute tasks effectively by dividing them into sequential subgoals. Task decomposition can be facilitated by using prompting techniques, task-specific instructions, or human inputs to guide the agent through the steps required to accomplish a task.'"
|
||||
"\"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into step-by-step processes, enhancing performance and understanding of the model's thinking process.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -278,7 +271,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "2b685428-8b82-4af1-be4f-7232c5d55b73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -322,7 +315,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"id": "66f275f3-ddef-4678-b90d-ee64576878f9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -354,7 +347,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"id": "0005810b-1b95-4666-a795-08d80e478b83",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -362,7 +355,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Task decomposition can be done in several common ways, such as using Language Model (LLM) with simple prompting like \"Steps for XYZ\" or asking for subgoals to achieve a specific task. Task-specific instructions can also be provided, like requesting a story outline for writing a novel. Additionally, human inputs can be utilized to decompose tasks into smaller components effectively.\n"
|
||||
"Task decomposition can be achieved through various methods such as using techniques like Chain of Thought (CoT) or Tree of Thoughts to break down complex tasks into smaller steps. Common ways include prompting the model with simple instructions like \"Steps for XYZ\" or task-specific instructions like \"Write a story outline.\" Human inputs can also be used to guide the task decomposition process effectively.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -421,7 +414,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 11,
|
||||
"id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -450,17 +443,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 12,
|
||||
"id": "1046c92f-21b3-4214-907d-92878d8cba23",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into manageable components.'"
|
||||
"'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -476,17 +469,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" to guide the model through subgoals, providing task-specific instructions like \"Write a story outline\" for specific tasks, or incorporating human inputs to break down complex tasks. These approaches help in dividing a large task into smaller, more manageable components for better understanding and execution.'"
|
||||
"'Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -508,7 +501,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 14,
|
||||
"id": "7686b874-3a85-499f-82b5-28a85c4c768c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -518,11 +511,11 @@
|
||||
"text": [
|
||||
"User: What is Task Decomposition?\n",
|
||||
"\n",
|
||||
"AI: Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into manageable components.\n",
|
||||
"AI: Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.\n",
|
||||
"\n",
|
||||
"User: What are common ways of doing it?\n",
|
||||
"\n",
|
||||
"AI: Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" to guide the model through subgoals, providing task-specific instructions like \"Write a story outline\" for specific tasks, or incorporating human inputs to break down complex tasks. These approaches help in dividing a large task into smaller, more manageable components for better understanding and execution.\n",
|
||||
"AI: Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
@@ -557,7 +550,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 15,
|
||||
"id": "71c32048-1a41-465f-a9e2-c4affc332fd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -657,17 +650,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 16,
|
||||
"id": "6d0a7a73-d151-47d9-9e99-b4f3291c0322",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. This process helps agents or models tackle difficult tasks by dividing them into more easily achievable subgoals. Task decomposition can be done through techniques like Chain of Thought or Tree of Thoughts, which guide the model in thinking step by step or exploring multiple reasoning possibilities at each step.'"
|
||||
"'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks to facilitate problem-solving. Different methods like Chain of Thought and Tree of Thoughts can be employed to decompose tasks effectively.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -683,17 +676,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 17,
|
||||
"id": "17021822-896a-4513-a17d-1d20b1c5381c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Common ways of task decomposition include using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide models in breaking down complex tasks into smaller steps. This can be achieved through simple prompting with LLMs, task-specific instructions, or human inputs to help the model understand and navigate the task effectively. Task decomposition aims to enhance model performance on complex tasks by utilizing more test-time computation and shedding light on the model's thinking process.\""
|
||||
"'Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\", providing task-specific instructions like \"Write a story outline,\" or incorporating human inputs to break down complex tasks into smaller components. These approaches help in organizing thoughts and planning ahead for successful task completion.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -724,7 +717,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 18,
|
||||
"id": "809cc747-2135-40a2-8e73-e4556343ee64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -749,17 +742,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 19,
|
||||
"id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 0ec120e2-b1fc-4593-9fee-2dd4f4cae256, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:'"
|
||||
"'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -781,14 +781,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 20,
|
||||
"id": "1726d151-4653-4c72-a187-a14840add526",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import chat_agent_executor\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(llm, tools)"
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -801,19 +801,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 21,
|
||||
"id": "170403a2-c914-41db-85d8-a2c381da112d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 1a50f4da-34a7-44af-8cbb-c67c90c9619e, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_demTlnha4vYA1IH6CByYupBQ', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d1c3f3da-be18-46a5-b3a8-4621ba1f7f2a-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_demTlnha4vYA1IH6CByYupBQ'}])]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dddbe2d2-2355-4ca5-9961-1ceb39d78cf9-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx'}])]}}\n",
|
||||
"----\n",
|
||||
"{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='e83e4002-33d2-46ff-82f4-fddb3035fb6a', tool_call_id='call_demTlnha4vYA1IH6CByYupBQ')]}}\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_1ZkTWsLYIlKZ1uMyIQGUuyJx')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in autonomous agent systems to break down complex tasks into smaller and simpler steps. This approach helps agents better understand and plan for the various steps involved in completing a task. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are prompted to \"think step by step\" to decompose hard tasks into manageable steps. Another approach, known as Tree of Thoughts, extends CoT by exploring multiple reasoning possibilities at each step and creating a tree structure of tasks.\\n\\nTask decomposition can be achieved through various methods, such as using simple prompts for language models, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can effectively plan and execute tasks with greater efficiency.\\n\\nIn summary, task decomposition is a valuable strategy for autonomous agents to tackle complex tasks by breaking them down into smaller, more manageable steps.', response_metadata={'token_usage': {'completion_tokens': 177, 'prompt_tokens': 588, 'total_tokens': 765}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-808f32b9-ae61-4f31-a55a-f30643594282-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method of task decomposition is the Chain of Thought (CoT) technique, where models are instructed to think step by step to decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step and generates multiple thoughts per step, creating a tree structure. Task decomposition can be facilitated by using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 119, 'prompt_tokens': 636, 'total_tokens': 755}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4a701854-97f2-4ec2-b6e1-73410911fa72-0')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -838,7 +845,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 22,
|
||||
"id": "04a3a664-3c3f-4cd1-9995-26662a52da7c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -847,9 +854,7 @@
|
||||
"\n",
|
||||
"memory = SqliteSaver.from_conn_string(\":memory:\")\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" llm, tools, checkpointer=memory\n",
|
||||
")"
|
||||
"agent_executor = create_react_agent(llm, tools, checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -864,7 +869,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 23,
|
||||
"id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -872,7 +877,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-1451e59b-b135-4776-985d-4759338ffee5-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-022806f0-eb26-4c87-9132-ed2fcc6c21ea-0')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -897,7 +902,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 24,
|
||||
"id": "e2c570ae-dd91-402c-8693-ae746de63b16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -905,11 +910,22 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ab2x4iUPSWDAHS5txL7PspSK', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-f76b5813-b41c-4d0d-9ed2-667b988d885e-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_ab2x4iUPSWDAHS5txL7PspSK'}])]}}\n",
|
||||
"----\n",
|
||||
"{'action': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='e0895fa5-5d41-4be0-98db-10a83d42fc2f', tool_call_id='call_ab2x4iUPSWDAHS5txL7PspSK')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in complex tasks where the task is broken down into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method for task decomposition is the Chain of Thought (CoT) technique, which prompts the model to think step by step and decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, agents can better plan and execute complex tasks effectively.\\n\\nIf you would like more detailed information or examples related to task decomposition, feel free to ask!', response_metadata={'token_usage': {'completion_tokens': 165, 'prompt_tokens': 611, 'total_tokens': 776}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-13296566-8577-4d65-982b-a39718988ca3-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_DdAAJJgGIQOZQgKVE4duDyML', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-acc3c903-4f6f-48dd-8b36-f6f3b80d0856-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_DdAAJJgGIQOZQgKVE4duDyML'}])]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 9a7ba580-ec91-412d-9649-1b5cbf5ae7bc, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_DdAAJJgGIQOZQgKVE4duDyML')]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@@ -936,7 +952,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": null,
|
||||
"id": "570d8c68-136e-4ba5-969a-03ba195f6118",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -983,7 +999,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": null,
|
||||
"id": "b1d2b4d4-e604-497d-873d-d345b808578e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -1031,9 +1047,7 @@
|
||||
"tools = [tool]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" llm, tools, checkpointer=memory\n",
|
||||
")"
|
||||
"agent_executor = create_react_agent(llm, tools, checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1080,7 +1094,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
"```\n",
|
||||
"## Preview\n",
|
||||
"\n",
|
||||
"In this guide we’ll build a QA app over as website. The specific website we will use isthe [LLM Powered Autonomous\n",
|
||||
"In this guide we’ll build a QA app over as website. The specific website we will use is the [LLM Powered Autonomous\n",
|
||||
"Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post\n",
|
||||
"by Lilian Weng, which allows us to ask questions about the contents of\n",
|
||||
"the post.\n",
|
||||
@@ -424,8 +424,7 @@
|
||||
"`TextSplitter`: Object that splits a list of `Document`s into smaller\n",
|
||||
"chunks. Subclass of `DocumentTransformer`s.\n",
|
||||
"\n",
|
||||
"- Explore [context-aware splitters](/docs/how_to#text-splitters), which keep the location (“context”) of each\n",
|
||||
" split in the original `Document`\n",
|
||||
"- Learn more about splitting text using different methods by reading the [how-to docs](/docs/how_to#text-splitters)\n",
|
||||
"- [Code (py or js)](/docs/integrations/document_loaders/source_code)\n",
|
||||
"- [Scientific papers](/docs/integrations/document_loaders/grobid)\n",
|
||||
"- [Interface](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TextSplitter.html): API reference for the base interface.\n",
|
||||
@@ -577,7 +576,7 @@
|
||||
" - `MultiQueryRetriever` [generates variants of the input\n",
|
||||
" question](/docs/how_to/MultiQueryRetriever)\n",
|
||||
" to improve retrieval hit rate.\n",
|
||||
" - `MultiVectorRetriever` (diagram below) instead generates\n",
|
||||
" - `MultiVectorRetriever` instead generates\n",
|
||||
" [variants of the\n",
|
||||
" embeddings](/docs/how_to/multi_vector),\n",
|
||||
" also in order to improve retrieval hit rate.\n",
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain langchain-community langchain-openai"
|
||||
]
|
||||
},
|
||||
@@ -55,11 +56,13 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"# Uncomment the below to use LangSmith. Not required.\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n",
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\""
|
||||
"# Comment out the below to opt-out of using LangSmith in this notebook. Not required.\n",
|
||||
"if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n",
|
||||
" os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n",
|
||||
" os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -467,6 +470,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langgraph"
|
||||
]
|
||||
},
|
||||
@@ -484,11 +488,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langgraph.prebuilt import chat_agent_executor\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" llm, tools, messages_modifier=system_message\n",
|
||||
")"
|
||||
"agent_executor = create_react_agent(llm, tools, messages_modifier=system_message)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -725,9 +727,7 @@
|
||||
"\n",
|
||||
"system_message = SystemMessage(content=system)\n",
|
||||
"\n",
|
||||
"agent = chat_agent_executor.create_tool_calling_executor(\n",
|
||||
" llm, tools, messages_modifier=system_message\n",
|
||||
")"
|
||||
"agent = create_react_agent(llm, tools, messages_modifier=system_message)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -780,7 +780,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -558,51 +558,6 @@
|
||||
"It's also possible to supply a prompt and return intermediate steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "cc931bde-8258-4d10-8479-f2d2d69f49f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/chestercurme/repos/langchain/libs/core/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The method `Chain.__call__` was deprecated in langchain 0.1.0 and will be removed in 0.2.0. Use invoke instead.\n",
|
||||
" warn_deprecated(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt_template = \"\"\"Write a concise summary of the following:\n",
|
||||
"{text}\n",
|
||||
"CONCISE SUMMARY:\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(prompt_template)\n",
|
||||
"\n",
|
||||
"refine_template = (\n",
|
||||
" \"Your job is to produce a final summary\\n\"\n",
|
||||
" \"We have provided an existing summary up to a certain point: {existing_answer}\\n\"\n",
|
||||
" \"We have the opportunity to refine the existing summary\"\n",
|
||||
" \"(only if needed) with some more context below.\\n\"\n",
|
||||
" \"------------\\n\"\n",
|
||||
" \"{text}\\n\"\n",
|
||||
" \"------------\\n\"\n",
|
||||
" \"Given the new context, refine the original summary in Italian\"\n",
|
||||
" \"If the context isn't useful, return the original summary.\"\n",
|
||||
")\n",
|
||||
"refine_prompt = PromptTemplate.from_template(refine_template)\n",
|
||||
"chain = load_summarize_chain(\n",
|
||||
" llm=llm,\n",
|
||||
" chain_type=\"refine\",\n",
|
||||
" question_prompt=prompt,\n",
|
||||
" refine_prompt=refine_prompt,\n",
|
||||
" return_intermediate_steps=True,\n",
|
||||
" input_key=\"input_documents\",\n",
|
||||
" output_key=\"output_text\",\n",
|
||||
")\n",
|
||||
"result = chain({\"input_documents\": split_docs}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
sidebar_position: 3
|
||||
sidebar_label: Changes
|
||||
keywords: [retrievalqa, llmchain, conversationalretrievalchain]
|
||||
---
|
||||
|
||||
# Deprecations and Breaking Changes
|
||||
@@ -499,7 +500,7 @@ Deprecated: 0.1.0
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
|
||||
|
||||
#### Chain.__call__
|
||||
@@ -565,7 +566,7 @@ Deprecated: 0.1.17
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: RunnableSequence, e.g., `prompt | llm`
|
||||
Alternative: [RunnableSequence](/docs/how_to/sequence/), e.g., `prompt | llm`
|
||||
|
||||
|
||||
#### LLMSingleActionAgent
|
||||
@@ -576,7 +577,7 @@ Deprecated: 0.1.0
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
|
||||
|
||||
#### Agent
|
||||
@@ -587,7 +588,7 @@ Deprecated: 0.1.0
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
|
||||
|
||||
#### OpenAIFunctionsAgent
|
||||
@@ -752,7 +753,7 @@ Deprecated: 0.1.17
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: create_retrieval_chain
|
||||
Alternative: [create_retrieval_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain)
|
||||
|
||||
|
||||
#### load_agent_from_config
|
||||
@@ -785,7 +786,7 @@ Deprecated: 0.1.0
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
Alternative: Use [LangGraph](/docs/how_to/migrate_agent/) or new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
|
||||
|
||||
|
||||
#### XMLAgent
|
||||
@@ -818,7 +819,7 @@ Deprecated: 0.1.17
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: create_history_aware_retriever together with create_retrieval_chain (see example in docstring)
|
||||
Alternative: [create_history_aware_retriever](https://api.python.langchain.com/en/latest/chains/langchain.chains.history_aware_retriever.create_history_aware_retriever.html) together with [create_retrieval_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html#langchain-chains-retrieval-create-retrieval-chain) (see example in docstring)
|
||||
|
||||
|
||||
#### create_extraction_chain_pydantic
|
||||
@@ -829,7 +830,7 @@ Deprecated: 0.1.14
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: with_structured_output method on chat models that support tool calling.
|
||||
Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
|
||||
|
||||
|
||||
#### create_openai_fn_runnable
|
||||
@@ -840,7 +841,7 @@ Deprecated: 0.1.14
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: with_structured_output method on chat models that support tool calling.
|
||||
Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
|
||||
|
||||
|
||||
#### create_structured_output_runnable
|
||||
@@ -851,7 +852,7 @@ Deprecated: 0.1.17
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: with_structured_output method on chat models that support tool calling.
|
||||
Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
|
||||
|
||||
|
||||
#### create_openai_fn_chain
|
||||
@@ -883,7 +884,7 @@ Deprecated: 0.1.14
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: with_structured_output method on chat models that support tool calling.
|
||||
Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
|
||||
|
||||
|
||||
#### create_extraction_chain_pydantic
|
||||
@@ -894,4 +895,4 @@ Deprecated: 0.1.14
|
||||
Removal: 0.3.0
|
||||
|
||||
|
||||
Alternative: with_structured_output method on chat models that support tool calling.
|
||||
Alternative: [with_structured_output](/docs/how_to/structured_output/#the-with_structured_output-method) method on chat models that support tool calling.
|
||||
@@ -292,6 +292,10 @@ const config = {
|
||||
{
|
||||
title: "GitHub",
|
||||
items: [
|
||||
{
|
||||
label: "Organization",
|
||||
href: "https://github.com/langchain-ai",
|
||||
},
|
||||
{
|
||||
label: "Python",
|
||||
href: "https://github.com/langchain-ai/langchain",
|
||||
|
||||
@@ -418,9 +418,9 @@ def _compact_module_full_name(doc_path: str) -> str:
|
||||
module = doc_path.split("#")[1].replace("module-", "")
|
||||
if module.count(".") > 2:
|
||||
# langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI
|
||||
# -> langchain_community.llms...OCIModelDeploymentTGI
|
||||
# -> langchain_community...OCIModelDeploymentTGI
|
||||
module_parts = module.split(".")
|
||||
module = f"{module_parts[0]}.{module_parts[1]}...{module_parts[-1]}"
|
||||
module = f"{module_parts[0]}...{module_parts[-1]}"
|
||||
return module
|
||||
|
||||
|
||||
@@ -515,17 +515,22 @@ def log_results(arxiv_id2type2key2urls):
|
||||
def generate_arxiv_references_page(file_name: Path, papers: list[ArxivPaper]) -> None:
|
||||
with open(file_name, "w") as f:
|
||||
# Write the table headers
|
||||
f.write("""# arXiv
|
||||
f.write(
|
||||
"""# arXiv
|
||||
|
||||
LangChain implements the latest research in the field of Natural Language Processing.
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
Templates, and Cookbooks.
|
||||
|
||||
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
|
||||
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
|------------------|---------|-------------------|------------------------|
|
||||
""")
|
||||
"""
|
||||
)
|
||||
for paper in papers:
|
||||
refs = []
|
||||
if paper.referencing_doc2url:
|
||||
@@ -595,7 +600,8 @@ This page contains `arXiv` papers referenced in the LangChain Documentation, API
|
||||
if el
|
||||
]
|
||||
)
|
||||
f.write(f"""
|
||||
f.write(
|
||||
f"""
|
||||
## {paper.title}
|
||||
|
||||
- **arXiv id:** {paper.arxiv_id}
|
||||
@@ -608,7 +614,8 @@ This page contains `arXiv` papers referenced in the LangChain Documentation, API
|
||||
{refs}
|
||||
|
||||
**Abstract:** {paper.abstract}
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
logger.warning(f"Created the {file_name} file with {len(papers)} arXiv references.")
|
||||
|
||||
|
||||
@@ -30,13 +30,18 @@ _DOCS_DIR = _CURRENT_PATH / "docs"
|
||||
def find_files(path):
|
||||
"""Find all MDX files in the given path"""
|
||||
# Check if is file first
|
||||
if ".ipynb_checkpoints" in str(path):
|
||||
return
|
||||
if os.path.isfile(path):
|
||||
yield path
|
||||
return
|
||||
for root, _, files in os.walk(path):
|
||||
for file in files:
|
||||
if file.endswith(".mdx") or file.endswith(".md"):
|
||||
yield os.path.join(root, file)
|
||||
full = os.path.join(root, file)
|
||||
if ".ipynb_checkpoints" in str(full):
|
||||
continue
|
||||
yield full
|
||||
|
||||
|
||||
def get_full_module_name(module_path, class_name):
|
||||
@@ -79,7 +84,7 @@ def main():
|
||||
.replace(".md", "/")
|
||||
)
|
||||
|
||||
doc_url = f"https://python.langchain.com/docs/{relative_path}"
|
||||
doc_url = f"https://python.langchain.com/v0.2/docs/{relative_path}"
|
||||
for import_info in file_imports:
|
||||
doc_title = import_info["title"]
|
||||
class_name = import_info["imported"]
|
||||
@@ -97,7 +102,7 @@ def main():
|
||||
|
||||
def _get_doc_title(data: str, file_name: str) -> str:
|
||||
try:
|
||||
return re.findall(r"^#\s+(.*)", data, re.MULTILINE)[0]
|
||||
return re.findall(r"^#\s*(.*)", data, re.MULTILINE)[0]
|
||||
except IndexError:
|
||||
pass
|
||||
# Parse the rst-style titles
|
||||
|
||||
@@ -62,6 +62,11 @@ CHAT_MODEL_FEAT_TABLE = {
|
||||
"package": "langchain-google-vertexai",
|
||||
"link": "/docs/integrations/chat/google_vertex_ai_palm/",
|
||||
},
|
||||
"ChatGoogleGenerativeAI": {
|
||||
"tool_calling": True,
|
||||
"package": "langchain-google-genai",
|
||||
"link": "/docs/integrations/chat/google_generative_ai/",
|
||||
},
|
||||
"ChatGroq": {
|
||||
"tool_calling": True,
|
||||
"structured_output": True,
|
||||
@@ -96,9 +101,21 @@ CHAT_MODEL_FEAT_TABLE = {
|
||||
"package": "langchain-community",
|
||||
"link": "/docs/integrations/chat/vllm/",
|
||||
},
|
||||
"ChatEdenAI": {
|
||||
"tool_calling": True,
|
||||
"structured_output": True,
|
||||
"package": "langchain-community",
|
||||
"link": "/docs/integrations/chat/edenai/",
|
||||
},
|
||||
"ChatLlamaCpp": {
|
||||
"tool_calling": True,
|
||||
"structured_output": True,
|
||||
"local": True,
|
||||
"package": "langchain-community",
|
||||
"link": "/docs/integrations/chat/llamacpp",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
LLM_TEMPLATE = """\
|
||||
---
|
||||
sidebar_position: 1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user