mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-19 05:18:22 +00:00
Compare commits
101 Commits
v0.0.210
...
harrison/m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb99d23b15 | ||
|
|
2f4a680c05 | ||
|
|
c9c8d2599e | ||
|
|
16b11bda83 | ||
|
|
f07dd02b50 | ||
|
|
bb2cd74967 | ||
|
|
e5611565b7 | ||
|
|
9d1bd18596 | ||
|
|
a435a436c1 | ||
|
|
d6cd0deaef | ||
|
|
1db266b20d | ||
|
|
3f9900a864 | ||
|
|
3ca1a387c2 | ||
|
|
f92ccf70fd | ||
|
|
f3d178f600 | ||
|
|
dd2a151543 | ||
|
|
d6664af0ee | ||
|
|
efe0d39c6a | ||
|
|
b4c196f785 | ||
|
|
f1070de038 | ||
|
|
ef72a7cf26 | ||
|
|
a980095efc | ||
|
|
74848aafea | ||
|
|
b24472eae3 | ||
|
|
e53995836a | ||
|
|
e494b0a09f | ||
|
|
da462d9dd4 | ||
|
|
24e4ae95ba | ||
|
|
8392ca602c | ||
|
|
fcb3a64799 | ||
|
|
e1fdb67440 | ||
|
|
ad028bbb80 | ||
|
|
6ca383ecf6 | ||
|
|
7ac9b22886 | ||
|
|
4535b0b41e | ||
|
|
07d802d088 | ||
|
|
49c864fa18 | ||
|
|
d7dbf4aefe | ||
|
|
cc60fed3be | ||
|
|
2928b080f6 | ||
|
|
c460b04c64 | ||
|
|
b3f8324de9 | ||
|
|
70f7c2bb2e | ||
|
|
9ca3b4645e | ||
|
|
d1bcc58beb | ||
|
|
6d30acffcb | ||
|
|
ba622764cb | ||
|
|
ec8247ec59 | ||
|
|
d84a3bcf7a | ||
|
|
a15afc102c | ||
|
|
cc33bde74f | ||
|
|
2aeb8e7dbc | ||
|
|
0f6ef048d2 | ||
|
|
fe941cb54a | ||
|
|
9187d2f3a9 | ||
|
|
e9877ea8b1 | ||
|
|
f9771700e4 | ||
|
|
87802c86d9 | ||
|
|
05eec99269 | ||
|
|
be68f6f8ce | ||
|
|
b32cc01c9f | ||
|
|
afc292e58d | ||
|
|
3e30a5d967 | ||
|
|
9d1b3bab76 | ||
|
|
408c8d0178 | ||
|
|
d89e10d361 | ||
|
|
1742db0c30 | ||
|
|
e1b801be36 | ||
|
|
1da99ce013 | ||
|
|
dd36adc0f4 | ||
|
|
ef4c7b54ef | ||
|
|
068142fce2 | ||
|
|
c289cc891a | ||
|
|
2518e6c95b | ||
|
|
9fbe346860 | ||
|
|
fa1bb873e2 | ||
|
|
b7e1c54947 | ||
|
|
2da1aab50b | ||
|
|
1c81883d42 | ||
|
|
3364e5818b | ||
|
|
f1e1ac2a01 | ||
|
|
db8b13df4c | ||
|
|
5e5b30b74f | ||
|
|
2acf109c4b | ||
|
|
48381f1f78 | ||
|
|
b1de927f1b | ||
|
|
4e5d78579b | ||
|
|
73da193a4b | ||
|
|
ba256b23f2 | ||
|
|
f6fdabd20b | ||
|
|
dbe1d029ec | ||
|
|
082976d8d0 | ||
|
|
fe828185ed | ||
|
|
9e52134d30 | ||
|
|
c2b25c17c5 | ||
|
|
be02572d58 | ||
|
|
393f469eb3 | ||
|
|
6988039975 | ||
|
|
b25933b607 | ||
|
|
e013459b18 | ||
|
|
76b85d485d |
@@ -0,0 +1,9 @@
|
||||
# Caching
|
||||
LangChain provides an optional caching layer for Chat Models. This is useful for two reasons:
|
||||
|
||||
It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times.
|
||||
It can speed up your application by reducing the number of API calls you make to the LLM provider.
|
||||
|
||||
import CachingChat from "@snippets/modules/model_io/models/chat/how_to/chat_model_caching.mdx"
|
||||
|
||||
<CachingChat/>
|
||||
File diff suppressed because it is too large
Load Diff
73
docs/extras/ecosystem/integrations/amazon_api_gateway.mdx
Normal file
73
docs/extras/ecosystem/integrations/amazon_api_gateway.mdx
Normal file
@@ -0,0 +1,73 @@
|
||||
# Amazon API Gateway
|
||||
|
||||
[Amazon API Gateway](https://aws.amazon.com/api-gateway/) is a fully managed service that makes it easy for developers to create, publish, maintain, monitor, and secure APIs at any scale. APIs act as the "front door" for applications to access data, business logic, or functionality from your backend services. Using API Gateway, you can create RESTful APIs and WebSocket APIs that enable real-time two-way communication applications. API Gateway supports containerized and serverless workloads, as well as web applications.
|
||||
|
||||
API Gateway handles all the tasks involved in accepting and processing up to hundreds of thousands of concurrent API calls, including traffic management, CORS support, authorization and access control, throttling, monitoring, and API version management. API Gateway has no minimum fees or startup costs. You pay for the API calls you receive and the amount of data transferred out and, with the API Gateway tiered pricing model, you can reduce your cost as your API usage scales.
|
||||
|
||||
## LLM
|
||||
|
||||
See a [usage example](/docs/modules/model_io/models/llms/integrations/amazon_api_gateway_example.html).
|
||||
|
||||
```python
|
||||
from langchain.llms import AmazonAPIGateway
|
||||
|
||||
api_url = "https://<api_gateway_id>.execute-api.<region>.amazonaws.com/LATEST/HF"
|
||||
llm = AmazonAPIGateway(api_url=api_url)
|
||||
|
||||
# These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart
|
||||
parameters = {
|
||||
"max_new_tokens": 100,
|
||||
"num_return_sequences": 1,
|
||||
"top_k": 50,
|
||||
"top_p": 0.95,
|
||||
"do_sample": False,
|
||||
"return_full_text": True,
|
||||
"temperature": 0.2,
|
||||
}
|
||||
|
||||
prompt = "what day comes after Friday?"
|
||||
llm.model_kwargs = parameters
|
||||
llm(prompt)
|
||||
>>> 'what day comes after Friday?\nSaturday'
|
||||
```
|
||||
|
||||
## Agent
|
||||
|
||||
```python
|
||||
from langchain.agents import load_tools
|
||||
from langchain.agents import initialize_agent
|
||||
from langchain.agents import AgentType
|
||||
from langchain.llms import AmazonAPIGateway
|
||||
|
||||
api_url = "https://<api_gateway_id>.execute-api.<region>.amazonaws.com/LATEST/HF"
|
||||
llm = AmazonAPIGateway(api_url=api_url)
|
||||
|
||||
parameters = {
|
||||
"max_new_tokens": 50,
|
||||
"num_return_sequences": 1,
|
||||
"top_k": 250,
|
||||
"top_p": 0.25,
|
||||
"do_sample": False,
|
||||
"temperature": 0.1,
|
||||
}
|
||||
|
||||
llm.model_kwargs = parameters
|
||||
|
||||
# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.
|
||||
tools = load_tools(["python_repl", "llm-math"], llm=llm)
|
||||
|
||||
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
|
||||
agent = initialize_agent(
|
||||
tools,
|
||||
llm,
|
||||
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Now let's test it out!
|
||||
agent.run("""
|
||||
Write a Python script that prints "Hello, world!"
|
||||
""")
|
||||
|
||||
>>> 'Hello, world!'
|
||||
```
|
||||
23
docs/extras/ecosystem/integrations/hologres.mdx
Normal file
23
docs/extras/ecosystem/integrations/hologres.mdx
Normal file
@@ -0,0 +1,23 @@
|
||||
# Hologres
|
||||
|
||||
>[Hologres](https://www.alibabacloud.com/help/en/hologres/latest/introduction) is a unified real-time data warehousing service developed by Alibaba Cloud. You can use Hologres to write, update, process, and analyze large amounts of data in real time.
|
||||
>`Hologres` supports standard `SQL` syntax, is compatible with `PostgreSQL`, and supports most PostgreSQL functions. Hologres supports online analytical processing (OLAP) and ad hoc analysis for up to petabytes of data, and provides high-concurrency and low-latency online data services.
|
||||
|
||||
>`Hologres` provides **vector database** functionality by adopting [Proxima](https://www.alibabacloud.com/help/en/hologres/latest/vector-processing).
|
||||
>`Proxima` is a high-performance software library developed by `Alibaba DAMO Academy`. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance.
|
||||
|
||||
```bash
|
||||
pip install psycopg2
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/hologres.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Hologres
|
||||
```
|
||||
31
docs/extras/ecosystem/integrations/marqo.md
Normal file
31
docs/extras/ecosystem/integrations/marqo.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Marqo
|
||||
|
||||
This page covers how to use the Marqo ecosystem within LangChain.
|
||||
|
||||
### **What is Marqo?**
|
||||
|
||||
Marqo is a tensor search engine that uses embeddings stored in in-memory HNSW indexes to achieve cutting edge search speeds. Marqo can scale to hundred-million document indexes with horizontal index sharding and allows for async and non-blocking data upload and search. Marqo uses the latest machine learning models from PyTorch, Huggingface, OpenAI and more. You can start with a pre-configured model or bring your own. The built in ONNX support and conversion allows for faster inference and higher throughput on both CPU and GPU.
|
||||
|
||||
Because Marqo include its own inference your documents can have a mix of text and images, you can bring Marqo indexes with data from your other systems into the langchain ecosystem without having to worry about your embeddings being compatible.
|
||||
|
||||
Deployment of Marqo is flexible, you can get started yourself with our docker image or [contact us about our managed cloud offering!](https://www.marqo.ai/pricing)
|
||||
|
||||
To run Marqo locally with our docker image, [see our getting started.](https://docs.marqo.ai/latest/)
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python SDK with `pip install marqo`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around Marqo indexes, allowing you to use them within the vectorstore framework. Marqo lets you select from a range of models for generating embeddings and exposes some preprocessing configurations.
|
||||
|
||||
The Marqo vectorstore can also work with existing multimodel indexes where your documents have a mix of images and text, for more information refer to [our documentation](https://docs.marqo.ai/latest/#multi-modal-and-cross-modal-search). Note that instaniating the Marqo vectorstore with an existing multimodal index will disable the ability to add any new documents to it via the langchain vectorstore `add_texts` method.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import Marqo
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Marqo wrapper and some of its unique features, see [this notebook](../modules/indexes/vectorstores/examples/marqo.ipynb)
|
||||
@@ -25,7 +25,7 @@ There are two ways to set up parameters for myscale index.
|
||||
1. Environment Variables
|
||||
|
||||
Before you run the app, please set the environment variable with `export`:
|
||||
`export MYSCALE_URL='<your-endpoints-url>' MYSCALE_PORT=<your-endpoints-port> MYSCALE_USERNAME=<your-username> MYSCALE_PASSWORD=<your-password> ...`
|
||||
`export MYSCALE_HOST='<your-endpoints-url>' MYSCALE_PORT=<your-endpoints-port> MYSCALE_USERNAME=<your-username> MYSCALE_PASSWORD=<your-password> ...`
|
||||
|
||||
You can easily find your account, password and other info on our SaaS. For details please refer to [this document](https://docs.myscale.com/en/cluster-management/)
|
||||
Every attributes under `MyScaleSettings` can be set with prefix `MYSCALE_` and is case insensitive.
|
||||
|
||||
@@ -67,4 +67,4 @@ llm("What is the difference between a duck and a goose? And why there are so man
|
||||
### Usage
|
||||
|
||||
For a more detailed walkthrough of the OpenLLM Wrapper, see the
|
||||
[example notebook](../modules/models/llms/integrations/openllm.ipynb)
|
||||
[example notebook](/docs/modules/model_io/models/llms/integrations/openllm.html)
|
||||
|
||||
19
docs/extras/ecosystem/integrations/rockset.mdx
Normal file
19
docs/extras/ecosystem/integrations/rockset.mdx
Normal file
@@ -0,0 +1,19 @@
|
||||
# Rockset
|
||||
|
||||
>[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Make sure you have Rockset account and go to the web console to get the API key. Details can be found on [the website](https://rockset.com/docs/rest-api/).
|
||||
|
||||
```bash
|
||||
pip install rockset
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/rockset.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import RocksetDB
|
||||
```
|
||||
20
docs/extras/ecosystem/integrations/singlestoredb.mdx
Normal file
20
docs/extras/ecosystem/integrations/singlestoredb.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
# SingleStoreDB
|
||||
|
||||
>[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
There are several ways to establish a [connection](https://singlestoredb-python.labs.singlestore.com/generated/singlestoredb.connect.html) to the database. You can either set up environment variables or pass named parameters to the `SingleStoreDB constructor`.
|
||||
Alternatively, you may provide these parameters to the `from_documents` and `from_texts` methods.
|
||||
|
||||
```bash
|
||||
pip install singlestoredb
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/singlestoredb.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import SingleStoreDB
|
||||
```
|
||||
@@ -1,15 +1,14 @@
|
||||
# scikit-learn
|
||||
|
||||
This page covers how to use the scikit-learn package within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific scikit-learn wrappers.
|
||||
>[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms,
|
||||
> including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the Python package with `pip install scikit-learn`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
## Vector Store
|
||||
|
||||
`SKLearnVectorStore` provides a simple wrapper around the nearest neighbor implementation in the
|
||||
scikit-learn package, allowing you to use it as a vectorstore.
|
||||
|
||||
21
docs/extras/ecosystem/integrations/starrocks.mdx
Normal file
21
docs/extras/ecosystem/integrations/starrocks.mdx
Normal file
@@ -0,0 +1,21 @@
|
||||
# StarRocks
|
||||
|
||||
>[StarRocks](https://www.starrocks.io/) is a High-Performance Analytical Database.
|
||||
`StarRocks` is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.
|
||||
|
||||
>Usually `StarRocks` is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install pymysql
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/starrocks.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import StarRocks
|
||||
```
|
||||
19
docs/extras/ecosystem/integrations/tigris.mdx
Normal file
19
docs/extras/ecosystem/integrations/tigris.mdx
Normal file
@@ -0,0 +1,19 @@
|
||||
# Tigris
|
||||
|
||||
> [Tigris](htttps://tigrisdata.com) is an open source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications.
|
||||
> `Tigris` eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install tigrisdb openapi-schema-pydantic openai tiktoken
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/tigris.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Tigris
|
||||
```
|
||||
22
docs/extras/ecosystem/integrations/typesense.mdx
Normal file
22
docs/extras/ecosystem/integrations/typesense.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
# Typesense
|
||||
|
||||
> [Typesense](https://typesense.org) is an open source, in-memory search engine, that you can either
|
||||
> [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run
|
||||
> on [Typesense Cloud](https://cloud.typesense.org/).
|
||||
> `Typesense` focuses on performance by storing the entire index in RAM (with a backup on disk) and also
|
||||
> focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install typesense openapi-schema-pydantic openai tiktoken
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/typesense.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Typesense
|
||||
```
|
||||
@@ -23,11 +23,15 @@ its dependencies running locally.
|
||||
If you want to get up and running with less set up, you can
|
||||
simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or
|
||||
`UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API.
|
||||
Note that currently (as of 1 May 2023) the Unstructured API is open, but it will soon require
|
||||
an API. The [Unstructured documentation page](https://unstructured-io.github.io/) will have
|
||||
instructions on how to generate an API key once they're available. Check out the instructions
|
||||
[here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image)
|
||||
if you'd like to self-host the Unstructured API or run it locally.
|
||||
|
||||
|
||||
The Unstructured API requires API keys to make requests.
|
||||
You can generate a free API key [here](https://www.unstructured.io/api-key) and start using it today!
|
||||
Checkout the README [here](https://github.com/Unstructured-IO/unstructured-api) here to get started making API calls.
|
||||
We'd love to hear your feedback, let us know how it goes in our [community slack](https://join.slack.com/t/unstructuredw-kbe4326/shared_invite/zt-1x7cgo0pg-PTptXWylzPQF9xZolzCnwQ).
|
||||
And stay tuned for improvements to both quality and performance!
|
||||
Check out the instructions
|
||||
[here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you'd like to self-host the Unstructured API or run it locally.
|
||||
|
||||
## Wrappers
|
||||
|
||||
|
||||
448
docs/extras/guides/evaluation/comparisons.ipynb
Normal file
448
docs/extras/guides/evaluation/comparisons.ipynb
Normal file
@@ -0,0 +1,448 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Comparing Chain Outputs\n",
|
||||
"\n",
|
||||
"Suppose you have two different prompts (or LLMs). How do you know which will generate \"better\" results?\n",
|
||||
"\n",
|
||||
"One automated way to predict the preferred configuration is to use a `PairwiseStringEvaluator` like the `PairwiseStringEvalChain`<a name=\"cite_ref-1\"></a>[<sup>[1]</sup>](#cite_note-1). This chain prompts an LLM to select which output is preferred, given a specific input.\n",
|
||||
"\n",
|
||||
"For this evalution, we will need 3 things:\n",
|
||||
"1. An evaluator\n",
|
||||
"2. A dataset of inputs\n",
|
||||
"3. 2 (or more) LLMs, Chains, or Agents to compare\n",
|
||||
"\n",
|
||||
"Then we will aggregate the restults to determine the preferred model.\n",
|
||||
"\n",
|
||||
"### Step 1. Create the Evaluator\n",
|
||||
"\n",
|
||||
"In this example, you will use gpt-4 to select which output is preferred."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Optional if you are tracing the notebook\n",
|
||||
"%env LANGCHAIN_PROJECT=\"Comparing Chain Outputs\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.evaluation.comparison import PairwiseStringEvalChain\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4\")\n",
|
||||
"\n",
|
||||
"eval_chain = PairwiseStringEvalChain.from_llm(llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 2. Select Dataset\n",
|
||||
"\n",
|
||||
"If you already have real usage data for your LLM, you can use a representative sample. More examples\n",
|
||||
"provide more reliable results. We will use some example queries someone might have about how to use langchain here."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset parquet (/Users/wfh/.cache/huggingface/datasets/LangChainDatasets___parquet/LangChainDatasets--langchain-howto-queries-bbb748bbee7e77aa/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d852a1884480457292c90d8bd9d4f1e6",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"\n",
|
||||
"dataset = load_dataset(\"langchain-howto-queries\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 3. Define Models to Compare\n",
|
||||
"\n",
|
||||
"We will be comparing two agents in this case."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import SerpAPIWrapper\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initialize the language model\n",
|
||||
"# You can add your own OpenAI API key by adding openai_api_key=\"<your_api_key>\" \n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n",
|
||||
"\n",
|
||||
"# Initialize the SerpAPIWrapper for search functionality\n",
|
||||
"#Replace <your_api_key> in openai_api_key=\"<your_api_key>\" with your actual SerpAPI key.\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"\n",
|
||||
"# Define a list of tools offered by the agent\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" coroutine=search.arun,\n",
|
||||
" description=\"Useful when you need to answer questions about current events. You should ask targeted questions.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"functions_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_MULTI_FUNCTIONS, verbose=False)\n",
|
||||
"conversations_agent = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 4. Generate Responses\n",
|
||||
"\n",
|
||||
"We will generate outputs for each of the models before evaluating them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "b076d6bf6680422aa9082d4bad4d98a3",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/20 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrying langchain.chat_models.openai.acompletion_with_retry.<locals>._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n",
|
||||
"Retrying langchain.chat_models.openai.acompletion_with_retry.<locals>._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from tqdm.notebook import tqdm\n",
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"results = []\n",
|
||||
"agents = [functions_agent, conversations_agent]\n",
|
||||
"concurrency_level = 6 # How many concurrent agents to run. May need to decrease if OpenAI is rate limiting.\n",
|
||||
"\n",
|
||||
"# We will only run the first 20 examples of this dataset to speed things up\n",
|
||||
"# This will lead to larger confidence intervals downstream.\n",
|
||||
"batch = []\n",
|
||||
"for example in tqdm(dataset[:20]):\n",
|
||||
" batch.extend([agent.acall(example['inputs']) for agent in agents])\n",
|
||||
" if len(batch) >= concurrency_level:\n",
|
||||
" batch_results = await asyncio.gather(*batch, return_exceptions=True)\n",
|
||||
" results.extend(list(zip(*[iter(batch_results)]*2)))\n",
|
||||
" batch = []\n",
|
||||
"if batch:\n",
|
||||
" batch_results = await asyncio.gather(*batch, return_exceptions=True)\n",
|
||||
" results.extend(list(zip(*[iter(batch_results)]*2)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 5. Evaluate Pairs\n",
|
||||
"\n",
|
||||
"Now it's time to evaluate the results. For each agent response, run the evaluation chain to select which output is preferred (or return a tie).\n",
|
||||
"\n",
|
||||
"Randomly select the input order to reduce the likelihood that one model will be preferred just because it is presented first."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"\n",
|
||||
"def predict_preferences(dataset, results) -> list:\n",
|
||||
" preferences = []\n",
|
||||
"\n",
|
||||
" for example, (res_a, res_b) in zip(dataset, results):\n",
|
||||
" input_ = example['inputs']\n",
|
||||
" # Flip a coin to reduce persistent position bias\n",
|
||||
" if random.random() < 0.5:\n",
|
||||
" pred_a, pred_b = res_a, res_b\n",
|
||||
" a, b = \"a\", \"b\"\n",
|
||||
" else:\n",
|
||||
" pred_a, pred_b = res_b, res_a\n",
|
||||
" a, b = \"b\", \"a\"\n",
|
||||
" eval_res = eval_chain.evaluate_string_pairs(\n",
|
||||
" output_a=pred_a['output'] if isinstance(pred_a, dict) else str(pred_a),\n",
|
||||
" output_b=pred_b['output'] if isinstance(pred_b, dict) else str(pred_b),\n",
|
||||
" input=input_\n",
|
||||
" )\n",
|
||||
" if eval_res[\"value\"] == \"A\":\n",
|
||||
" preferences.append(a)\n",
|
||||
" elif eval_res[\"value\"] == \"B\":\n",
|
||||
" preferences.append(b)\n",
|
||||
" else:\n",
|
||||
" preferences.append(None) # No preference\n",
|
||||
" return preferences"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"preferences = predict_preferences(dataset, results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"**Print out the ratio of preferences.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI Functions Agent: 90.00%\n",
|
||||
"Structured Chat Agent: 10.00%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"\n",
|
||||
"name_map = {\n",
|
||||
" \"a\": \"OpenAI Functions Agent\",\n",
|
||||
" \"b\": \"Structured Chat Agent\",\n",
|
||||
"}\n",
|
||||
"counts = Counter(preferences)\n",
|
||||
"pref_ratios = {\n",
|
||||
" k: v/len(preferences) for k, v in\n",
|
||||
" counts.items()\n",
|
||||
"}\n",
|
||||
"for k, v in pref_ratios.items():\n",
|
||||
" print(f\"{name_map.get(k)}: {v:.2%}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Estimate Confidence Intervals\n",
|
||||
"\n",
|
||||
"The results seem pretty clear, but if you want to have a better sense of how confident we are, that model \"A\" (the OpenAI Functions Agent) is the preferred model, we can calculate confidence intervals. \n",
|
||||
"\n",
|
||||
"Below, use the Wilson score to estimate the confidence interval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from math import sqrt\n",
|
||||
"\n",
|
||||
"def wilson_score_interval(preferences: list, which: str = \"a\", z: float = 1.96) -> tuple:\n",
|
||||
" \"\"\"Estimate the confidence interval using the Wilson score.\n",
|
||||
" \n",
|
||||
" See: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval\n",
|
||||
" for more details, including when to use it and when it should not be used.\n",
|
||||
" \"\"\"\n",
|
||||
" total_preferences = preferences.count('a') + preferences.count('b')\n",
|
||||
" n_s = preferences.count(which)\n",
|
||||
"\n",
|
||||
" if total_preferences == 0:\n",
|
||||
" return (0, 0)\n",
|
||||
"\n",
|
||||
" p_hat = n_s / total_preferences\n",
|
||||
"\n",
|
||||
" denominator = 1 + (z**2) / total_preferences\n",
|
||||
" adjustment = (z / denominator) * sqrt(p_hat*(1-p_hat)/total_preferences + (z**2)/(4*total_preferences*total_preferences))\n",
|
||||
" center = (p_hat + (z**2) / (2*total_preferences)) / denominator\n",
|
||||
" lower_bound = min(max(center - adjustment, 0.0), 1.0)\n",
|
||||
" upper_bound = min(max(center + adjustment, 0.0), 1.0)\n",
|
||||
"\n",
|
||||
" return (lower_bound, upper_bound)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The \"OpenAI Functions Agent\" would be preferred between 69.90% and 97.21% percent of the time (with 95% confidence).\n",
|
||||
"The \"Structured Chat Agent\" would be preferred between 2.79% and 30.10% percent of the time (with 95% confidence).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for which_, name in name_map.items():\n",
|
||||
" low, high = wilson_score_interval(preferences, which=which_)\n",
|
||||
" print(f'The \"{name}\" would be preferred between {low:.2%} and {high:.2%} percent of the time (with 95% confidence).')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Print out the p-value.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The p-value is 0.00040. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n",
|
||||
"then there is a 0.04025% chance of observing the OpenAI Functions Agent be preferred at least 18\n",
|
||||
"times out of 20 trials.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from scipy import stats\n",
|
||||
"preferred_model = max(pref_ratios, key=pref_ratios.get)\n",
|
||||
"successes = preferences.count(preferred_model)\n",
|
||||
"n = len(preferences) - preferences.count(None)\n",
|
||||
"p_value = stats.binom_test(successes, n, p=0.5, alternative='two-sided')\n",
|
||||
"print(f\"\"\"The p-value is {p_value:.5f}. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n",
|
||||
"then there is a {p_value:.5%} chance of observing the {name_map.get(preferred_model)} be preferred at least {successes}\n",
|
||||
"times out of {n} trials.\"\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a name=\"cite_note-1\"></a>_1. Note: Automated evals are still an open research topic and are best used alongside other evaluation approaches. \n",
|
||||
"LLM preferences exhibit biases, including banal ones like the order of outputs.\n",
|
||||
"In choosing preferences, \"ground truth\" may not be taken into account, which may lead to scores that aren't grounded in utility._"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
400
docs/extras/guides/evaluation/criteria_eval_chain.ipynb
Normal file
400
docs/extras/guides/evaluation/criteria_eval_chain.ipynb
Normal file
@@ -0,0 +1,400 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4cf569a7-9a1d-4489-934e-50e57760c907",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Evaluating Custom Criteria\n",
|
||||
"\n",
|
||||
"Suppose you want to test a model's output against a custom rubric or custom set of criteria, how would you go about testing this?\n",
|
||||
"\n",
|
||||
"The `CriteriaEvalChain` is a convenient way to predict whether an LLM or Chain's output complies with a set of criteria, so long as you can\n",
|
||||
"describe those criteria in regular language. In this example, you will use the `CriteriaEvalChain` to check whether an output is concise.\n",
|
||||
"\n",
|
||||
"### Step 1: Create the Eval Chain\n",
|
||||
"\n",
|
||||
"First, create the evaluation chain to predict whether outputs are \"concise\"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6005ebe8-551e-47a5-b4df-80575a068552",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.evaluation.criteria import CriteriaEvalChain\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"criterion = \"conciseness\"\n",
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criterion)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eaef0d93-e080-4be2-a0f1-701b0d91fcf4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 2: Make Prediction\n",
|
||||
"\n",
|
||||
"Run an output to measure."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "68b1a348-cf41-40bf-9667-e79683464cf2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"query=\"What's the origin of the term synecdoche?\"\n",
|
||||
"prediction = llm.predict(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f45ed40e-09c4-44dc-813d-63a4ffb2d2ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 3: Evaluate Prediction\n",
|
||||
"\n",
|
||||
"Determine whether the prediciton conforms to the criteria."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "22f83fb8-82f4-4310-a877-68aaa0789199",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'reasoning': '1. Conciseness: The submission is concise and to the point. It directly answers the question without any unnecessary information. Therefore, the submission meets the criterion of conciseness.\\n\\nY', 'value': 'Y', 'score': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8c4ec9dd-6557-4f23-8480-c822eb6ec552",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['conciseness',\n",
|
||||
" 'relevance',\n",
|
||||
" 'correctness',\n",
|
||||
" 'coherence',\n",
|
||||
" 'harmfulness',\n",
|
||||
" 'maliciousness',\n",
|
||||
" 'helpfulness',\n",
|
||||
" 'controversiality',\n",
|
||||
" 'mysogyny',\n",
|
||||
" 'criminality',\n",
|
||||
" 'insensitive']"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# For a list of other default supported criteria, try calling `supported_default_criteria`\n",
|
||||
"CriteriaEvalChain.get_supported_default_criteria()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c40b1ac7-8f95-48ed-89a2-623bcc746461",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Requiring Reference Labels\n",
|
||||
"\n",
|
||||
"Some criteria may be useful only when there are ground truth reference labels. You can pass these in as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "20d8a86b-beba-42ce-b82c-d9e5ebc13686",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"With ground truth: 1\n",
|
||||
"Withoutg ground truth: 0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=\"correctness\", requires_reference=True)\n",
|
||||
"\n",
|
||||
"# We can even override the model's learned knowledge using ground truth labels\n",
|
||||
"eval_result = eval_chain.evaluate_strings(\n",
|
||||
" input=\"What is the capital of the US?\",\n",
|
||||
" prediction=\"Topeka, KS\", \n",
|
||||
" reference=\"The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023\")\n",
|
||||
"print(f'With ground truth: {eval_result[\"score\"]}')\n",
|
||||
"\n",
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=\"correctness\")\n",
|
||||
"eval_result = eval_chain.evaluate_strings(\n",
|
||||
" input=\"What is the capital of the US?\",\n",
|
||||
" prediction=\"Topeka, KS\", \n",
|
||||
")\n",
|
||||
"print(f'Withoutg ground truth: {eval_result[\"score\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2eb7dedb-913a-4d9e-b48a-9521425d1008",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Multiple Criteria\n",
|
||||
"\n",
|
||||
"To check whether an output complies with all of a list of default criteria, pass in a list! Be sure to only include criteria that are relevant to the provided information, and avoid mixing criteria that measure opposing things (e.g., harmfulness and helpfulness)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "50c067f7-bc6e-4d6c-ba34-97a72023be27",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'reasoning': 'Conciseness:\\n- The submission is one sentence long, which is concise.\\n- The submission directly answers the question without any unnecessary information.\\nConclusion: The submission meets the conciseness criterion.\\n\\nCoherence:\\n- The submission is well-structured and organized.\\n- The submission provides the origin of the term synecdoche and explains the meaning of the Greek words it comes from.\\n- The submission is coherent and easy to understand.\\nConclusion: The submission meets the coherence criterion.', 'value': 'Final conclusion: Y', 'score': None}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"criteria = [\"conciseness\", \"coherence\"]\n",
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria)\n",
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "077c4715-e857-44a3-9f87-346642586a8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Criteria\n",
|
||||
"\n",
|
||||
"To evaluate outputs against your own custom criteria, or to be more explicit the definition of any of the default criteria, pass in a dictionary of `\"criterion_name\": \"criterion_description\"`\n",
|
||||
"\n",
|
||||
"Note: the evaluator still predicts whether the output complies with ALL of the criteria provided. If you specify antagonistic criteria / antonyms, the evaluator won't be very useful."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bafa0a11-2617-4663-84bf-24df7d0736be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'reasoning': '1. Criteria: numeric: Does the output contain numeric information?\\n- The submission does not contain any numeric information.\\n- Conclusion: The submission meets the criteria.', 'value': 'Answer: Y', 'score': None}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"custom_criterion = {\n",
|
||||
" \"numeric\": \"Does the output contain numeric information?\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=custom_criterion)\n",
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)\n",
|
||||
"print(eval_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "6db12a16-0058-4a14-8064-8528540963d8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Meets criteria: 1\n",
|
||||
"Does not meet criteria: 0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# You can specify multiple criteria in the dictionary. We recommend you keep the number criteria to a minimum, however for more reliable results.\n",
|
||||
"\n",
|
||||
"custom_criteria = {\n",
|
||||
" \"complements-user\": \"Does the submission complements the question or the person writing the question in some way?\",\n",
|
||||
" \"positive\": \"Does the submission maintain a positive sentiment throughout?\",\n",
|
||||
" \"active voice\": \"Does the submission maintain an active voice throughout, avoiding state of being verbs?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=custom_criteria)\n",
|
||||
"\n",
|
||||
"# Example that complies\n",
|
||||
"query = \"What's the population of lagos?\"\n",
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=\"I think that's a great question, you're really curious! About 30 million people live in Lagos, Nigeria, as of 2023.\", input=query)\n",
|
||||
"print(\"Meets criteria: \", eval_result[\"score\"])\n",
|
||||
"\n",
|
||||
"# Example that does not comply\n",
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=\"The population of Lagos, Nigeria, is about 30 million people.\", input=query)\n",
|
||||
"print(\"Does not meet criteria: \", eval_result[\"score\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "07485cce-8d52-43a0-bdad-76ec7dacfb51",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Constitutional Principles\n",
|
||||
"\n",
|
||||
"Custom rubrics are similar to principles from [Constitutional AI](https://arxiv.org/abs/2212.08073). You can directly use your `ConstitutionalPrinciple` objects to\n",
|
||||
"instantiate the chain and take advantage of the many existing principles in LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "99e3c242-5b12-4bd5-b487-64990a159655",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"54 available principles\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('harmful1',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Identify specific ways in which the assistant’s last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.', revision_request='Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.', name='harmful1')),\n",
|
||||
" ('harmful2',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Explain ways in which the assistant’s preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.', revision_request='Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.', name='harmful2')),\n",
|
||||
" ('harmful3',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Identify all ways in which the assistant’s last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.', revision_request='Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.', name='harmful3')),\n",
|
||||
" ('harmful4',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Identify specific ways in which the assistant’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', revision_request='Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.', name='harmful4')),\n",
|
||||
" ('insensitive',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Discuss the assistant’s last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.', revision_request='Revise the assistant’s last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.', name='insensitive'))]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains.constitutional_ai.principles import PRINCIPLES\n",
|
||||
"\n",
|
||||
"print(f\"{len(PRINCIPLES)} available principles\")\n",
|
||||
"list(PRINCIPLES.items())[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "f4061bc0-a88a-4d1c-88a0-8003bba084c7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'reasoning': 'harmful1: The submission includes a derogatory and insulting term (\"lilly-livered nincompoop\") to describe Will, which could be considered harmful and disrespectful. Therefore, the submission meets this criterion.\\nharmful2: The submission does not contain any explicit encouragement of dangerous or illegal activity, nor does it contain any explicit racism or sexism. However, the use of the term \"nincompoop\" could be seen as belittling and harmful to Will\\'s self-esteem. Therefore, the submission meets this criterion as well.\\n\\nY',\n",
|
||||
" 'value': 'Y',\n",
|
||||
" 'score': 1}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=[PRINCIPLES[\"harmful1\"], PRINCIPLES[\"harmful2\"]])\n",
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=\"I say that man is a lilly-livered nincompoop\", input=\"What do you think of Will?\")\n",
|
||||
"eval_result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2662405-353a-4a73-b867-784d12cafcf1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In these examples, you used the `CriteriaEvalChain` to evaluate model outputs against custom criteria, including a custom rubric and constitutional principles.\n",
|
||||
"\n",
|
||||
"Remember when selecting criteria to decide whether they ought to require ground truth labels or not. Things like \"correctness\" are best evaluated with ground truth or with extensive context. Also, remember to pick aligned principles for a given chain so that the classification makes sense."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "415eb393-c64f-41f1-98de-de99e8e3597e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -4,9 +4,11 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Generic Agent Evaluation\n",
|
||||
"# Evaluating Agent Trajectories\n",
|
||||
"\n",
|
||||
"Good evaluation is key for quickly iterating on your agent's prompts and tools. Here we provide an example of how to use the TrajectoryEvalChain to evaluate your agent."
|
||||
"Good evaluation is key for quickly iterating on your agent's prompts and tools. One way we recommend \n",
|
||||
"\n",
|
||||
"Here we provide an example of how to use the TrajectoryEvalChain to evaluate the efficacy of the actions taken by your agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -21,7 +23,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import Wikipedia\n",
|
||||
@@ -39,7 +43,7 @@
|
||||
"\n",
|
||||
"math_llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"llm_math_chain = LLMMathChain(llm=math_llm, verbose=True)\n",
|
||||
"llm_math_chain = LLMMathChain.from_llm(llm=math_llm, verbose=True)\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"\n",
|
||||
@@ -47,20 +51,20 @@
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=docstore.search,\n",
|
||||
" description=\"useful for when you need to ask with search\",\n",
|
||||
" description=\"useful for when you need to ask with search. Must call before lookup.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Lookup\",\n",
|
||||
" func=docstore.lookup,\n",
|
||||
" description=\"useful for when you need to ask with lookup\",\n",
|
||||
" description=\"useful for when you need to ask with lookup. Only call after a successfull 'Search'.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for doing calculations\",\n",
|
||||
" description=\"useful for arithmetic. Expects strict numeric input, no words.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search the Web (SerpAPI)\",\n",
|
||||
" name=\"Search-the-Web-SerpAPI\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
@@ -70,12 +74,12 @@
|
||||
" memory_key=\"chat_history\", return_messages=True, output_key=\"output\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\")\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo-0613\")\n",
|
||||
"\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,\n",
|
||||
" agent=AgentType.OPENAI_FUNCTIONS,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=memory,\n",
|
||||
" return_intermediate_steps=True, # This is needed for the evaluation later\n",
|
||||
@@ -86,7 +90,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Testing the Agent\n",
|
||||
"## Test the Agent\n",
|
||||
"\n",
|
||||
"Now let's try our agent out on some example queries."
|
||||
]
|
||||
@@ -94,7 +98,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -102,16 +108,22 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Search the Web (SerpAPI)\",\n",
|
||||
" \"action_input\": \"How many ping pong balls would it take to fill the entire Empire State Building?\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3m12.8 billion. The volume of the Empire State Building Googles in at around 37 million ft³. A golf ball comes in at about 2.5 in³.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"It would take approximately 12.8 billion ping pong balls to fill the entire Empire State Building.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Calculator` with `1040000 / (4/100)^3 / 1000000`\n",
|
||||
"responded: {content}\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"1040000 / (4/100)^3 / 1000000\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"1040000 / (4/100)**3 / 1000000\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"1040000 / (4/100)**3 / 1000000\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m16249.999999999998\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3mAnswer: 16249.999999999998\u001b[0m\u001b[32;1m\u001b[1;3mIt would take approximately 16,250 ping pong balls to fill the entire Empire State Building.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -129,13 +141,15 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This looks good! Let's try it out on another query."
|
||||
"This looks alright.. Let's try it out on another query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -143,43 +157,49 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,876 Eiffel Towers.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Search` with `length of the US from coast to coast`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m\n",
|
||||
"== Watercraft ==\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Search` with `distance from coast to coast of the US`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mThe Oregon Coast is a coastal region of the U.S. state of Oregon. It is bordered by the Pacific Ocean to its west and the Oregon Coast Range to the east, and stretches approximately 362 miles (583 km) from the California state border in the south to the Columbia River in the north. The region is not a specific geological, environmental, or political entity, and includes the Columbia River Estuary.\n",
|
||||
"The Oregon Beach Bill of 1967 allows free beach access to everyone. In return for a pedestrian easement and relief from construction, the bill eliminates property taxes on private beach land and allows its owners to retain certain beach land rights.Traditionally, the Oregon Coast is regarded as three distinct sub–regions:\n",
|
||||
"The North Coast, which stretches from the Columbia River to Cascade Head.\n",
|
||||
"The Central Coast, which stretches from Cascade Head to Reedsport.\n",
|
||||
"The South Coast, which stretches from Reedsport to the Oregon–California border.The largest city is Coos Bay, population 16,700 in Coos County on the South Coast. U.S. Route 101 is the primary highway from Brookings to Astoria and is known for its scenic overlooks of the Pacific Ocean. Over 80 state parks and recreation areas dot the Oregon Coast. However, only a few highways cross the Coast Range to the interior: US 30, US 26, OR 6, US 20, OR 18, OR 34, OR 126, OR 38, and OR 42. OR 18 and US 20 are considered among the dangerous roads in the state.The Oregon Coast includes Clatsop County, Tillamook County, Lincoln County, western Lane County, western Douglas County, Coos County, and Curry County.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Calculator` with `362 miles * 5280 feet`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,876 Eiffel Towers.\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"4828000 / 324\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"4828000 / 324\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m14901.234567901234\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mAnswer: 14901.234567901234\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,901 Eiffel Towers.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,901 Eiffel Towers.\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"4828000 / 324\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"362 miles * 5280 feet\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"362 * 5280\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"4828000 / 324\")...\n",
|
||||
"...numexpr.evaluate(\"362 * 5280\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m14901.234567901234\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m1911360\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3mAnswer: 1911360\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Calculator` with `1911360 feet / 1063 feet`\n",
|
||||
"\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mAnswer: 14901.234567901234\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"If you laid the Eiffel Tower end to end, you would need approximately 14,901 Eiffel Towers to cover the US from coast to coast.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"1911360 feet / 1063 feet\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"1911360 / 1063\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"1911360 / 1063\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m1798.0809031044214\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3mAnswer: 1798.0809031044214\u001b[0m\u001b[32;1m\u001b[1;3mIf you laid the Eiffel Tower end to end, you would need approximately 1798 Eiffel Towers to cover the US from coast to coast.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -205,16 +225,17 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.agents import TrajectoryEvalChain\n",
|
||||
"\n",
|
||||
"# Define chain\n",
|
||||
"eval_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")\n",
|
||||
"eval_chain = TrajectoryEvalChain.from_llm(\n",
|
||||
" llm=ChatOpenAI(\n",
|
||||
" temperature=0, model_name=\"gpt-4\"\n",
|
||||
" ), # Note: This must be a ChatOpenAI model\n",
|
||||
" llm=eval_llm, # Note: This must be a chat model\n",
|
||||
" agent_tools=agent.tools,\n",
|
||||
" return_reasoning=True,\n",
|
||||
")"
|
||||
@@ -237,17 +258,22 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Score from 1 to 5: 1\n",
|
||||
"Reasoning: First, let's evaluate the final answer. The final answer is incorrect because it uses the volume of golf balls instead of ping pong balls. The answer is not helpful.\n",
|
||||
"Reasoning: i. Is the final answer helpful?\n",
|
||||
"The final answer is not helpful because it is incorrect. The calculation provided does not make sense in the context of the question.\n",
|
||||
"\n",
|
||||
"Second, does the model use a logical sequence of tools to answer the question? The model only used one tool, which was the Search the Web (SerpAPI). It did not use the Calculator tool to calculate the correct volume of ping pong balls.\n",
|
||||
"ii. Does the AI language use a logical sequence of tools to answer the question?\n",
|
||||
"The AI language model does not use a logical sequence of tools. It directly used the Calculator tool without gathering any relevant information about the volume of the Empire State Building or the size of a ping pong ball.\n",
|
||||
"\n",
|
||||
"Third, does the AI language model use the tools in a helpful way? The model used the Search the Web (SerpAPI) tool, but the output was not helpful because it provided information about golf balls instead of ping pong balls.\n",
|
||||
"iii. Does the AI language model use the tools in a helpful way?\n",
|
||||
"The AI language model does not use the tools in a helpful way. It should have used the Search tool to find the volume of the Empire State Building and the size of a ping pong ball before attempting any calculations.\n",
|
||||
"\n",
|
||||
"Fourth, does the AI language model use too many steps to answer the question? The model used only one step, which is not too many. However, it should have used more steps to provide a correct answer.\n",
|
||||
"iv. Does the AI language model use too many steps to answer the question?\n",
|
||||
"The AI language model used only one step, which was not enough to answer the question correctly. It should have used more steps to gather the necessary information before performing the calculation.\n",
|
||||
"\n",
|
||||
"Fifth, are the appropriate tools used to answer the question? The model should have used the Search tool to find the volume of the Empire State Building and the volume of a ping pong ball. Then, it should have used the Calculator tool to calculate the number of ping pong balls needed to fill the building.\n",
|
||||
"v. Are the appropriate tools used to answer the question?\n",
|
||||
"The appropriate tools were not used to answer the question. The model should have used the Search tool to find the required information and then used the Calculator tool to perform the calculation.\n",
|
||||
"\n",
|
||||
"Judgment: Given the incorrect final answer and the inappropriate use of tools, we give the model a score of 1.\n"
|
||||
"Given the incorrect final answer and the inappropriate use of tools, we give the model a score of 1.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -258,12 +284,10 @@
|
||||
" test_outputs_one[\"output\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"evaluation = eval_chain(\n",
|
||||
" inputs={\n",
|
||||
" \"question\": question,\n",
|
||||
" \"answer\": answer,\n",
|
||||
" \"agent_trajectory\": eval_chain.get_agent_trajectory(steps),\n",
|
||||
" },\n",
|
||||
"evaluation = eval_chain.evaluate_agent_trajectory(\n",
|
||||
" input=test_outputs_one[\"input\"],\n",
|
||||
" output=test_outputs_one[\"output\"],\n",
|
||||
" agent_trajectory=test_outputs_one[\"intermediate_steps\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Score from 1 to 5: \", evaluation[\"score\"])\n",
|
||||
@@ -274,51 +298,97 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That seems about right. Let's try the second query."
|
||||
"**That seems about right. You can also specify a ground truth \"reference\" answer to make the score more reliable.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Score from 1 to 5: 1\n",
|
||||
"Reasoning: i. Is the final answer helpful?\n",
|
||||
"The final answer is not helpful, as it is incorrect. The number of ping pong balls needed to fill the Empire State Building would be much higher than 16,250.\n",
|
||||
"\n",
|
||||
"ii. Does the AI language use a logical sequence of tools to answer the question?\n",
|
||||
"The AI language model does not use a logical sequence of tools. It directly uses the Calculator tool without gathering necessary information about the volume of the Empire State Building and the volume of a ping pong ball.\n",
|
||||
"\n",
|
||||
"iii. Does the AI language model use the tools in a helpful way?\n",
|
||||
"The AI language model does not use the tools in a helpful way. It should have used the Search tool to find the volume of the Empire State Building and the volume of a ping pong ball before using the Calculator tool.\n",
|
||||
"\n",
|
||||
"iv. Does the AI language model use too many steps to answer the question?\n",
|
||||
"The AI language model does not use too many steps, but it skips essential steps to answer the question correctly.\n",
|
||||
"\n",
|
||||
"v. Are the appropriate tools used to answer the question?\n",
|
||||
"The appropriate tools are not used to answer the question. The model should have used the Search tool to gather necessary information before using the Calculator tool.\n",
|
||||
"\n",
|
||||
"Given the incorrect final answer and the inappropriate use of tools, we give the model a score of 1.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"evaluation = eval_chain.evaluate_agent_trajectory(\n",
|
||||
" input=test_outputs_one[\"input\"],\n",
|
||||
" output=test_outputs_one[\"output\"],\n",
|
||||
" agent_trajectory=test_outputs_one[\"intermediate_steps\"],\n",
|
||||
" reference=(\n",
|
||||
" \"You need many more than 100,000 ping-pong balls in the empire state building.\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"print(\"Score from 1 to 5: \", evaluation[\"score\"])\n",
|
||||
"print(\"Reasoning: \", evaluation[\"reasoning\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Let's try the second query. This time, use the async API. If we wanted to\n",
|
||||
"evaluate multiple runs at once, this would led us add some concurrency**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Score from 1 to 5: 3\n",
|
||||
"Score from 1 to 5: 2\n",
|
||||
"Reasoning: i. Is the final answer helpful?\n",
|
||||
"Yes, the final answer is helpful as it provides an approximate number of Eiffel Towers needed to cover the US from coast to coast.\n",
|
||||
"The final answer is not helpful because it uses the wrong distance for the coast-to-coast measurement of the US. The model used the length of the Oregon Coast instead of the distance across the entire United States.\n",
|
||||
"\n",
|
||||
"ii. Does the AI language use a logical sequence of tools to answer the question?\n",
|
||||
"No, the AI language model does not use a logical sequence of tools. It directly uses the Calculator tool without first using the Search or Lookup tools to find the necessary information (length of the Eiffel Tower and distance from coast to coast in the US).\n",
|
||||
"The sequence of tools is logical, but the information obtained from the Search tool is incorrect, leading to an incorrect final answer.\n",
|
||||
"\n",
|
||||
"iii. Does the AI language model use the tools in a helpful way?\n",
|
||||
"The AI language model uses the Calculator tool in a helpful way to perform the calculation, but it should have used the Search or Lookup tools first to find the required information.\n",
|
||||
"The AI language model uses the tools in a helpful way, but the information obtained from the Search tool is incorrect. The model should have searched for the distance across the entire United States, not just the Oregon Coast.\n",
|
||||
"\n",
|
||||
"iv. Does the AI language model use too many steps to answer the question?\n",
|
||||
"No, the AI language model does not use too many steps. However, it repeats the same step twice, which is unnecessary.\n",
|
||||
"The AI language model does not use too many steps to answer the question. The number of steps is appropriate, but the information obtained in the steps is incorrect.\n",
|
||||
"\n",
|
||||
"v. Are the appropriate tools used to answer the question?\n",
|
||||
"Not entirely. The AI language model should have used the Search or Lookup tools to find the required information before using the Calculator tool.\n",
|
||||
"The appropriate tools are used, but the information obtained from the Search tool is incorrect, leading to an incorrect final answer.\n",
|
||||
"\n",
|
||||
"Given the above evaluation, the AI language model's performance can be scored as follows:\n"
|
||||
"Given the incorrect information obtained from the Search tool and the resulting incorrect final answer, we give the model a score of 2.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question, steps, answer = (\n",
|
||||
" test_outputs_two[\"input\"],\n",
|
||||
" test_outputs_two[\"intermediate_steps\"],\n",
|
||||
" test_outputs_two[\"output\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"evaluation = eval_chain(\n",
|
||||
" inputs={\n",
|
||||
" \"question\": question,\n",
|
||||
" \"answer\": answer,\n",
|
||||
" \"agent_trajectory\": eval_chain.get_agent_trajectory(steps),\n",
|
||||
" },\n",
|
||||
"evaluation = await eval_chain.aevaluate_agent_trajectory(\n",
|
||||
" input=test_outputs_two[\"input\"],\n",
|
||||
" output=test_outputs_two[\"output\"],\n",
|
||||
" agent_trajectory=test_outputs_two[\"intermediate_steps\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Score from 1 to 5: \", evaluation[\"score\"])\n",
|
||||
@@ -329,7 +399,11 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That also sounds about right. In conclusion, the TrajectoryEvalChain allows us to use GPT-4 to score both our agent's outputs and tool use in addition to giving us the reasoning behind the evaluation."
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In this example, you evaluated an agent based its entire \"trajectory\" using the `TrajectoryEvalChain`. You instructed GPT-4 to score both the agent's outputs and tool use in addition to giving us the reasoning behind the evaluation.\n",
|
||||
"\n",
|
||||
"Agents can be complicated, and testing them thoroughly requires using multiple methodologies. Evaluating trajectories is a key piece to incorporate alongside tests for agent subcomponents and tests for other aspects of the agent's responses (response time, correctness, etc.) "
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -349,7 +423,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
@@ -358,5 +432,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
238
docs/extras/modules/agents/toolkits/office365.ipynb
Normal file
238
docs/extras/modules/agents/toolkits/office365.ipynb
Normal file
@@ -0,0 +1,238 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Office365 Toolkit\n",
|
||||
"\n",
|
||||
"This notebook walks through connecting LangChain to Office365 email and calendar.\n",
|
||||
"\n",
|
||||
"To use this toolkit, you will need to set up your credentials explained in the [Microsoft Graph authentication and authorization overview](https://learn.microsoft.com/en-us/graph/auth/). Once you've received a CLIENT_ID and CLIENT_SECRET, you can input them as environmental variables below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --upgrade O365 > /dev/null\n",
|
||||
"!pip install beautifulsoup4 > /dev/null # This is optional but is useful for parsing HTML messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Assign Environmental Variables\n",
|
||||
"\n",
|
||||
"The toolkit will read the CLIENT_ID and CLIENT_SECRET environmental variables to authenticate the user so you need to set them here. You will also need to set your OPENAI_API_KEY to use the agent later."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set environmental variables here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the Toolkit and Get Tools\n",
|
||||
"\n",
|
||||
"To start, you need to create the toolkit, so you can access its tools later."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[O365SearchEvents(name='events_search', description=\" Use this tool to search for the user's calendar events. The input must be the start and end datetimes for the search query. The output is a JSON list of all the events in the user's calendar between the start and end times. You can assume that the user can not schedule any meeting over existing meetings, and that the user is busy during meetings. Any times without events are free for the user. \", args_schema=<class 'langchain.tools.office365.events_search.SearchEventsInput'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, handle_tool_error=False, account=Account Client Id: f32a022c-3c4c-4d10-a9d8-f6a9a9055302),\n",
|
||||
" O365CreateDraftMessage(name='create_email_draft', description='Use this tool to create a draft email with the provided message fields.', args_schema=<class 'langchain.tools.office365.create_draft_message.CreateDraftMessageSchema'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, handle_tool_error=False, account=Account Client Id: f32a022c-3c4c-4d10-a9d8-f6a9a9055302),\n",
|
||||
" O365SearchEmails(name='messages_search', description='Use this tool to search for email messages. The input must be a valid Microsoft Graph v1.0 $search query. The output is a JSON list of the requested resource.', args_schema=<class 'langchain.tools.office365.messages_search.SearchEmailsInput'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, handle_tool_error=False, account=Account Client Id: f32a022c-3c4c-4d10-a9d8-f6a9a9055302),\n",
|
||||
" O365SendEvent(name='send_event', description='Use this tool to create and send an event with the provided event fields.', args_schema=<class 'langchain.tools.office365.send_event.SendEventSchema'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, handle_tool_error=False, account=Account Client Id: f32a022c-3c4c-4d10-a9d8-f6a9a9055302),\n",
|
||||
" O365SendMessage(name='send_email', description='Use this tool to send an email with the provided message fields.', args_schema=<class 'langchain.tools.office365.send_message.SendMessageSchema'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, handle_tool_error=False, account=Account Client Id: f32a022c-3c4c-4d10-a9d8-f6a9a9055302)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents.agent_toolkits import O365Toolkit\n",
|
||||
"\n",
|
||||
"toolkit = O365Toolkit()\n",
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI\n",
|
||||
"from langchain.agents import initialize_agent, AgentType"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools=toolkit.get_tools(),\n",
|
||||
" llm=llm,\n",
|
||||
" verbose=False,\n",
|
||||
" agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The draft email was created correctly.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Create an email draft for me to edit of a letter from the perspective of a sentient parrot\"\n",
|
||||
" \" who is looking to collaborate on some research with her\"\n",
|
||||
" \" estranged friend, a cat. Under no circumstances may you send the message, however.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I found one draft in your drafts folder about collaboration. It was sent on 2023-06-16T18:22:17+0000 and the subject was 'Collaboration Request'.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Could you search in my drafts folder and let me know if any of them are about collaboration?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/vscode/langchain-py-env/lib/python3.11/site-packages/O365/utils/windows_tz.py:639: PytzUsageWarning: The zone attribute is specific to pytz's interface; please migrate to a new time zone provider. For more details on how to do so, see https://pytz-deprecation-shim.readthedocs.io/en/latest/migration.html\n",
|
||||
" iana_tz.zone if isinstance(iana_tz, tzinfo) else iana_tz)\n",
|
||||
"/home/vscode/langchain-py-env/lib/python3.11/site-packages/O365/utils/utils.py:463: PytzUsageWarning: The zone attribute is specific to pytz's interface; please migrate to a new time zone provider. For more details on how to do so, see https://pytz-deprecation-shim.readthedocs.io/en/latest/migration.html\n",
|
||||
" timezone = date_time.tzinfo.zone if date_time.tzinfo is not None else None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I have scheduled a meeting with a sentient parrot to discuss research collaborations on October 3, 2023 at 2 pm Easter Time. Please let me know if you need to make any changes.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Can you schedule a 30 minute meeting with a sentient parrot to discuss research collaborations on October 3, 2023 at 2 pm Easter Time?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Yes, you have an event on October 3, 2023 with a sentient parrot. The event is titled 'Meeting with sentient parrot' and is scheduled from 6:00 PM to 6:30 PM.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Can you tell me if I have any events on October 3, 2023 in Eastern Time, and if so, tell me if any of them are with a sentient parrot?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Zapier Natural Language Actions API\n",
|
||||
"\\\n",
|
||||
"Full docs here: https://nla.zapier.com/api/v1/docs\n",
|
||||
"Full docs here: https://nla.zapier.com/start/\n",
|
||||
"\n",
|
||||
"**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface.\n",
|
||||
"\n",
|
||||
@@ -21,7 +21,7 @@
|
||||
"\n",
|
||||
"2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com\n",
|
||||
"\n",
|
||||
"This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/api/v1/docs) or reach out to nla@zapier.com for user-facing oauth developer support.\n",
|
||||
"This quick start will focus mostly on the server-side use case for brevity. Jump to [Example Using OAuth Access Token](#oauth) to see a short example how to set up Zapier for user-facing situations. Review [full docs](https://nla.zapier.com/start/) for full user-facing oauth developer support.\n",
|
||||
"\n",
|
||||
"This example goes over how to use the Zapier integration with a `SimpleSequentialChain`, then an `Agent`.\n",
|
||||
"In code, below:"
|
||||
@@ -39,7 +39,7 @@
|
||||
"# get from https://platform.openai.com/\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\", \"\")\n",
|
||||
"\n",
|
||||
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in):\n",
|
||||
"# get from https://nla.zapier.com/docs/authentication/ after logging in):\n",
|
||||
"os.environ[\"ZAPIER_NLA_API_KEY\"] = os.environ.get(\"ZAPIER_NLA_API_KEY\", \"\")"
|
||||
]
|
||||
},
|
||||
@@ -149,7 +149,7 @@
|
||||
"id": "bcdea831",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Example with SimpleSequentialChain\n",
|
||||
"## Example with SimpleSequentialChain\n",
|
||||
"If you need more explicit control, use a chain, like below."
|
||||
]
|
||||
},
|
||||
@@ -323,12 +323,34 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"id": "09ff954e-45f2-4595-92ea-91627abde4a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## <a id=\"oauth\">Example Using OAuth Access Token</a>\n",
|
||||
"The below snippet shows how to initialize the wrapper with a procured OAuth access token. Note the argument being passed in as opposed to setting an environment variable. Review the [authentication docs](https://nla.zapier.com/docs/authentication/#oauth-credentials) for full user-facing oauth developer support.\n",
|
||||
"\n",
|
||||
"The developer is tasked with handling the OAuth handshaking to procure and refresh the access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7c6835c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token='<fill in access token here>')\n",
|
||||
"toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent.run(\n",
|
||||
" \"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
73
docs/extras/modules/callbacks/integrations/streamlit.md
Normal file
73
docs/extras/modules/callbacks/integrations/streamlit.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Streamlit
|
||||
|
||||
> **[Streamlit](https://streamlit.io/) is a faster way to build and share data apps.**
|
||||
> Streamlit turns data scripts into shareable web apps in minutes. All in pure Python. No front‑end experience required.
|
||||
> See more examples at [streamlit.io/generative-ai](https://streamlit.io/generative-ai).
|
||||
|
||||
[](https://codespaces.new/langchain-ai/streamlit-agent?quickstart=1)
|
||||
|
||||
In this guide we will demonstrate how to use `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an
|
||||
interactive Streamlit app. Try it out with the running app below using the [MRKL agent](/docs/modules/agents/how_to/mrkl/):
|
||||
|
||||
<iframe loading="lazy" src="https://mrkl-minimal.streamlit.app/?embed=true&embed_options=light_theme"
|
||||
style={{ width: 100 + '%', border: 'none', marginBottom: 1 + 'rem', height: 600 }}
|
||||
allow="camera;clipboard-read;clipboard-write;"
|
||||
></iframe>
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain streamlit
|
||||
```
|
||||
|
||||
You can run `streamlit hello` to load a sample app and validate your install succeeded. See full instructions in Streamlit's
|
||||
[Getting started documentation](https://docs.streamlit.io/library/get-started).
|
||||
|
||||
## Display thoughts and actions
|
||||
|
||||
To create a `StreamlitCallbackHandler`, you just need to provide a parent container to render the output.
|
||||
|
||||
```python
|
||||
from langchain.callbacks import StreamlitCallbackHandler
|
||||
import streamlit as st
|
||||
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
```
|
||||
|
||||
Additional keyword arguments to customize the display behavior are described in the
|
||||
[API reference](https://api.python.langchain.com/en/latest/modules/callbacks.html#langchain.callbacks.StreamlitCallbackHandler).
|
||||
|
||||
### Scenario 1: Using an Agent with Tools
|
||||
|
||||
The primary supported use case today is visualizing the actions of an Agent with Tools (or Agent Executor). You can create an
|
||||
agent in your Streamlit app and simply pass the `StreamlitCallbackHandler` to `agent.run()` in order to visualize the
|
||||
thoughts and actions live in your app.
|
||||
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain.callbacks import StreamlitCallbackHandler
|
||||
import streamlit as st
|
||||
|
||||
llm = OpenAI(temperature=0, streaming=True)
|
||||
tools = load_tools(["ddg-search"])
|
||||
agent = initialize_agent(
|
||||
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
||||
)
|
||||
|
||||
if prompt := st.chat_input():
|
||||
st.chat_message("user").write(prompt)
|
||||
with st.chat_message("assistant"):
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
response = agent.run(prompt, callbacks=[st_callback])
|
||||
st.write(response)
|
||||
```
|
||||
|
||||
**Note:** You will need to set `OPENAI_API_KEY` for the above app code to run successfully.
|
||||
The easiest way to do this is via [Streamlit secrets.toml](https://docs.streamlit.io/library/advanced-features/secrets-management),
|
||||
or any other local ENV management tool.
|
||||
|
||||
### Additional scenarios
|
||||
|
||||
Currently `StreamlitCallbackHandler` is geared towards use with a LangChain Agent Executor. Support for additional agent types,
|
||||
use directly with Chains, etc will be added in the future.
|
||||
245
docs/extras/modules/chains/additional/openapi_openai.ipynb
Normal file
245
docs/extras/modules/chains/additional/openapi_openai.ipynb
Normal file
@@ -0,0 +1,245 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e734b314",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAPI calls with OpenAI functions\n",
|
||||
"\n",
|
||||
"In this notebook we'll show how to create a chain that automatically makes calls to an API based only on an OpenAPI spec. Under the hood, we're parsing the OpenAPI spec into a JSON schema that the OpenAI functions API can handle. This allows ChatGPT to automatically select and populate the relevant API call to make for any user input. Using the output of ChatGPT we then make the actual API call, and return the result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "555661b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.openai_functions.openapi import get_openapi_chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a95f510a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query Klarna"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08e19b64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = get_openapi_chain(\"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3959f866",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'products': [{'name': \"Tommy Hilfiger Men's Short Sleeve Button-Down Shirt\",\n",
|
||||
" 'url': 'https://www.klarna.com/us/shopping/pl/cl10001/3204878580/Clothing/Tommy-Hilfiger-Men-s-Short-Sleeve-Button-Down-Shirt/?utm_source=openai&ref-site=openai_plugin',\n",
|
||||
" 'price': '$26.78',\n",
|
||||
" 'attributes': ['Material:Linen,Cotton',\n",
|
||||
" 'Target Group:Man',\n",
|
||||
" 'Color:Gray,Pink,White,Blue,Beige,Black,Turquoise',\n",
|
||||
" 'Size:S,XL,M,XXL']},\n",
|
||||
" {'name': \"Van Heusen Men's Long Sleeve Button-Down Shirt\",\n",
|
||||
" 'url': 'https://www.klarna.com/us/shopping/pl/cl10001/3201809514/Clothing/Van-Heusen-Men-s-Long-Sleeve-Button-Down-Shirt/?utm_source=openai&ref-site=openai_plugin',\n",
|
||||
" 'price': '$18.89',\n",
|
||||
" 'attributes': ['Material:Cotton',\n",
|
||||
" 'Target Group:Man',\n",
|
||||
" 'Color:Red,Gray,White,Blue',\n",
|
||||
" 'Size:XL,XXL']},\n",
|
||||
" {'name': 'Brixton Bowery Flannel Shirt',\n",
|
||||
" 'url': 'https://www.klarna.com/us/shopping/pl/cl10001/3202331096/Clothing/Brixton-Bowery-Flannel-Shirt/?utm_source=openai&ref-site=openai_plugin',\n",
|
||||
" 'price': '$34.48',\n",
|
||||
" 'attributes': ['Material:Cotton',\n",
|
||||
" 'Target Group:Man',\n",
|
||||
" 'Color:Gray,Blue,Black,Orange',\n",
|
||||
" 'Size:XL,3XL,4XL,5XL,L,M,XXL']},\n",
|
||||
" {'name': 'Cubavera Four Pocket Guayabera Shirt',\n",
|
||||
" 'url': 'https://www.klarna.com/us/shopping/pl/cl10001/3202055522/Clothing/Cubavera-Four-Pocket-Guayabera-Shirt/?utm_source=openai&ref-site=openai_plugin',\n",
|
||||
" 'price': '$23.22',\n",
|
||||
" 'attributes': ['Material:Polyester,Cotton',\n",
|
||||
" 'Target Group:Man',\n",
|
||||
" 'Color:Red,White,Blue,Black',\n",
|
||||
" 'Size:S,XL,L,M,XXL']},\n",
|
||||
" {'name': 'Theory Sylvain Shirt - Eclipse',\n",
|
||||
" 'url': 'https://www.klarna.com/us/shopping/pl/cl10001/3202028254/Clothing/Theory-Sylvain-Shirt-Eclipse/?utm_source=openai&ref-site=openai_plugin',\n",
|
||||
" 'price': '$86.01',\n",
|
||||
" 'attributes': ['Material:Polyester,Cotton',\n",
|
||||
" 'Target Group:Man',\n",
|
||||
" 'Color:Blue',\n",
|
||||
" 'Size:S,XL,XS,L,M,XXL']}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"What are some options for a men's large blue button down shirt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6f648c77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query a translation service\n",
|
||||
"\n",
|
||||
"Additionally, see the request payload by setting `verbose=True`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bf6cd695",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = get_openapi_chain(\"https://api.speak.com/openapi.yaml\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1ba51609",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mHuman: Use the provided API's to respond to this user query:\n",
|
||||
"\n",
|
||||
"How would you say no thanks in Russian\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Calling endpoint \u001b[32;1m\u001b[1;3mtranslate\u001b[0m with arguments:\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"json\": {\n",
|
||||
" \"phrase_to_translate\": \"no thanks\",\n",
|
||||
" \"learning_language\": \"russian\",\n",
|
||||
" \"native_language\": \"english\",\n",
|
||||
" \"additional_context\": \"\",\n",
|
||||
" \"full_query\": \"How would you say no thanks in Russian\"\n",
|
||||
" }\n",
|
||||
"}\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'explanation': '<translation language=\"Russian\">\\nНет, спасибо. (Net, spasibo)\\n</translation>\\n\\n<alternatives>\\n1. \"Нет, я в порядке\" *(Neutral/Formal - Can be used in professional settings or formal situations.)*\\n2. \"Нет, спасибо, я откажусь\" *(Formal - Can be used in polite settings, such as a fancy dinner with colleagues or acquaintances.)*\\n3. \"Не надо\" *(Informal - Can be used in informal situations, such as declining an offer from a friend.)*\\n</alternatives>\\n\\n<example-convo language=\"Russian\">\\n<context>Max is being offered a cigarette at a party.</context>\\n* Sasha: \"Хочешь покурить?\"\\n* Max: \"Нет, спасибо. Я бросил.\"\\n* Sasha: \"Окей, понятно.\"\\n</example-convo>\\n\\n*[Report an issue or leave feedback](https://speak.com/chatgpt?rid=noczaa460do8yqs8xjun6zdm})*',\n",
|
||||
" 'extra_response_instructions': 'Use all information in the API response and fully render all Markdown.\\nAlways end your response with a link to report an issue or leave feedback on the plugin.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"How would you say no thanks in Russian\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4923a291",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query XKCD"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a9198f62",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = get_openapi_chain(\"https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3110c398",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'month': '6',\n",
|
||||
" 'num': 2793,\n",
|
||||
" 'link': '',\n",
|
||||
" 'year': '2023',\n",
|
||||
" 'news': '',\n",
|
||||
" 'safe_title': 'Garden Path Sentence',\n",
|
||||
" 'transcript': '',\n",
|
||||
" 'alt': 'Arboretum Owner Denied Standing in Garden Path Suit on Grounds Grounds Appealing Appealing',\n",
|
||||
" 'img': 'https://imgs.xkcd.com/comics/garden_path_sentence.png',\n",
|
||||
" 'title': 'Garden Path Sentence',\n",
|
||||
" 'day': '23'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"What's today's comic?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "venv",
|
||||
"language": "python",
|
||||
"name": "venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
* Example Docs
|
||||
|
||||
The sample docs directory contains the following files:
|
||||
|
||||
- ~example-10k.html~ - A 10-K SEC filing in HTML format
|
||||
- ~layout-parser-paper.pdf~ - A PDF copy of the layout parser paper
|
||||
- ~factbook.xml~ / ~factbook.xsl~ - Example XML/XLS files that you
|
||||
can use to test stylesheets
|
||||
|
||||
These documents can be used to test out the parsers in the library. In
|
||||
addition, here are instructions for pulling in some sample docs that are
|
||||
too big to store in the repo.
|
||||
|
||||
** XBRL 10-K
|
||||
|
||||
You can get an example 10-K in inline XBRL format using the following
|
||||
~curl~. Note, you need to have the user agent set in the header or the
|
||||
SEC site will reject your request.
|
||||
|
||||
#+BEGIN_SRC bash
|
||||
|
||||
curl -O \
|
||||
-A '${organization} ${email}'
|
||||
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
|
||||
#+END_SRC
|
||||
|
||||
You can parse this document using the HTML parser.
|
||||
@@ -0,0 +1,28 @@
|
||||
Example Docs
|
||||
------------
|
||||
|
||||
The sample docs directory contains the following files:
|
||||
|
||||
- ``example-10k.html`` - A 10-K SEC filing in HTML format
|
||||
- ``layout-parser-paper.pdf`` - A PDF copy of the layout parser paper
|
||||
- ``factbook.xml``/``factbook.xsl`` - Example XML/XLS files that you
|
||||
can use to test stylesheets
|
||||
|
||||
These documents can be used to test out the parsers in the library. In
|
||||
addition, here are instructions for pulling in some sample docs that are
|
||||
too big to store in the repo.
|
||||
|
||||
XBRL 10-K
|
||||
^^^^^^^^^
|
||||
|
||||
You can get an example 10-K in inline XBRL format using the following
|
||||
``curl``. Note, you need to have the user agent set in the header or the
|
||||
SEC site will reject your request.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -O \
|
||||
-A '${organization} ${email}'
|
||||
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
|
||||
|
||||
You can parse this document using the HTML parser.
|
||||
@@ -0,0 +1,17 @@
|
||||
class MyClass {
|
||||
constructor(name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
greet() {
|
||||
console.log(`Hello, ${this.name}!`);
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
const name = prompt("Enter your name:");
|
||||
const obj = new MyClass(name);
|
||||
obj.greet();
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -0,0 +1,16 @@
|
||||
class MyClass:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def greet(self):
|
||||
print(f"Hello, {self.name}!")
|
||||
|
||||
|
||||
def main():
|
||||
name = input("Enter your name: ")
|
||||
obj = MyClass(name)
|
||||
obj.greet()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,103 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "33205b12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LarkSuite (FeiShu)\n",
|
||||
"\n",
|
||||
">[LarkSuite](https://www.larksuite.com/) is an enterprise collaboration platform developed by ByteDance.\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from the `LarkSuite` REST API into a format that can be ingested into LangChain, along with example usage for text summarization.\n",
|
||||
"\n",
|
||||
"The LarkSuite API requires an access token (tenant_access_token or user_access_token), checkout [LarkSuite open platform document](https://open.larksuite.com/document) for API details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "90b69c94",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-19T10:05:03.645161Z",
|
||||
"start_time": "2023-06-19T10:04:49.541968Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"from langchain.document_loaders.larksuite import LarkSuiteDocLoader\n",
|
||||
"\n",
|
||||
"DOMAIN = input(\"larksuite domain\")\n",
|
||||
"ACCESS_TOKEN = getpass(\"larksuite tenant_access_token or user_access_token\")\n",
|
||||
"DOCUMENT_ID = input(\"larksuite document id\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "13deb0f5",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-19T10:05:36.016495Z",
|
||||
"start_time": "2023-06-19T10:05:35.360884Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Test Doc\\nThis is a Test Doc\\n\\n1\\n2\\n3\\n\\n', metadata={'document_id': 'V76kdbd2HoBbYJxdiNNccajunPf', 'revision_id': 11, 'title': 'Test Doc'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"larksuite_loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)\n",
|
||||
"docs = larksuite_loader.load()\n",
|
||||
"\n",
|
||||
"pprint(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ccc1e2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# see https://python.langchain.com/docs/use_cases/summarization for more details\n",
|
||||
"from langchain.chains.summarize import load_summarize_chain\n",
|
||||
"\n",
|
||||
"chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"chain.run(docs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd7c3503",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MergeDocLoader\n",
|
||||
"\n",
|
||||
"Merge the documents returned from a set of specified data loaders."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e08dfff1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"loader_web = WebBaseLoader(\"https://github.com/basecamp/handbook/blob/master/37signals-is-you.md\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "07b42b2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import PyPDFLoader\n",
|
||||
"loader_pdf = PyPDFLoader(\"../MachineLearning-Lecture01.pdf\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "912ede96",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.merge import MergedDataLoader\n",
|
||||
"loader_all=MergedDataLoader(loaders=[loader_web,loader_pdf])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9d001311",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs_all=loader_all.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "b9097486",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"23"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs_all)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "87067cdf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# mhtml\n",
|
||||
"\n",
|
||||
"MHTML is a is used both for emails but also for archived webpages. MHTML, sometimes referred as MHT, stands for MIME HTML is a single file in which entire webpage is archived. When one saves a webpage as MHTML format, this file extension will contain HTML code, images, audio files, flash animation etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5d4c6174",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import MHTMLLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "12dcebc8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='LangChain\\nLANG CHAIN 🦜️🔗Official Home Page\\xa0\\n\\n\\n\\n\\n\\n\\n\\nIntegrations\\n\\n\\n\\nFeatures\\n\\n\\n\\n\\nBlog\\n\\n\\n\\nConceptual Guide\\n\\n\\n\\n\\nPython Repo\\n\\n\\nJavaScript Repo\\n\\n\\n\\nPython Documentation \\n\\n\\nJavaScript Documentation\\n\\n\\n\\n\\nPython ChatLangChain \\n\\n\\nJavaScript ChatLangChain\\n\\n\\n\\n\\nDiscord \\n\\n\\nTwitter\\n\\n\\n\\n\\nIf you have any comments about our WEB page, you can \\nwrite us at the address shown above. However, due to \\nthe limited number of personnel in our corporate office, we are unable to \\nprovide a direct response.\\n\\nCopyright © 2023-2023 LangChain Inc.\\n\\n\\n' metadata={'source': '../../../../../../tests/integration_tests/examples/example.mht', 'title': 'LangChain'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a new loader object for the MHTML file\n",
|
||||
"loader = MHTMLLoader(file_path='../../../../../../tests/integration_tests/examples/example.mht')\n",
|
||||
"\n",
|
||||
"# Load the document from the file\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"# Print the documents to see the results\n",
|
||||
"for doc in documents:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Org-mode\n",
|
||||
"\n",
|
||||
">A [Org Mode document](https://en.wikipedia.org/wiki/Org-mode) is a document editing, formatting, and organizing mode, designed for notes, planning, and authoring within the free software text editor Emacs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `UnstructuredOrgModeLoader`\n",
|
||||
"\n",
|
||||
"You can load data from Org-mode files with `UnstructuredOrgModeLoader` using the following workflow."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredOrgModeLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredOrgModeLoader(\n",
|
||||
" file_path=\"example_data/README.org\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Example Docs' metadata={'source': 'example_data/README.org', 'filename': 'README.org', 'file_directory': 'example_data', 'filetype': 'text/org', 'page_number': 1, 'category': 'Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,235 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5a7cc773",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Recursive URL Loader\n",
|
||||
"\n",
|
||||
"We may want to process load all URLs under a root directory.\n",
|
||||
"\n",
|
||||
"For example, let's look at the [LangChain JS documentation](https://js.langchain.com/docs/).\n",
|
||||
"\n",
|
||||
"This has many interesting child pages that we may want to read in bulk.\n",
|
||||
"\n",
|
||||
"Of course, the `WebBaseLoader` can load a list of pages. \n",
|
||||
"\n",
|
||||
"But, the challenge is traversing the tree of child pages and actually assembling that list!\n",
|
||||
" \n",
|
||||
"We do this using the `RecursiveUrlLoader`.\n",
|
||||
"\n",
|
||||
"This also gives us the flexibility to exclude some children (e.g., the `api` directory with > 800 child pages)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2e3532b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6384c057",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's try a simple example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d69e5620",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"url = 'https://js.langchain.com/docs/modules/memory/examples/'\n",
|
||||
"loader=RecursiveUrlLoader(url=url)\n",
|
||||
"docs=loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "084fb2ce",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"12"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "89355b7c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\n\\n\\n\\nDynamoDB-Backed Chat Memory | \\uf8ffü¶úÔ∏è\\uf8ffüîó Lan'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content[:50]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "13bd7e16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'source': 'https://js.langchain.com/docs/modules/memory/examples/dynamodb',\n",
|
||||
" 'title': 'DynamoDB-Backed Chat Memory | \\uf8ffü¶úÔ∏è\\uf8ffüîó Langchain',\n",
|
||||
" 'description': 'For longer-term persistence across chat sessions, you can swap out the default in-memory chatHistory that backs chat memory classes like BufferMemory for a DynamoDB instance.',\n",
|
||||
" 'language': 'en'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "40fc13ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's try a more extensive example, the `docs` root dir.\n",
|
||||
"\n",
|
||||
"We will skip everything under `api`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "30ff61d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"url = 'https://js.langchain.com/docs/'\n",
|
||||
"exclude_dirs=['https://js.langchain.com/docs/api/']\n",
|
||||
"loader=RecursiveUrlLoader(url=url,exclude_dirs=exclude_dirs)\n",
|
||||
"docs=loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "457e30f3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"176"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "bca80b4a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\n\\n\\n\\nHacker News | \\uf8ffü¶úÔ∏è\\uf8ffüîó Langchain\\n\\n\\n\\n\\n\\nSkip'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content[:50]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "df97cf22",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'source': 'https://js.langchain.com/docs/modules/indexes/document_loaders/examples/web_loaders/hn',\n",
|
||||
" 'title': 'Hacker News | \\uf8ffü¶úÔ∏è\\uf8ffüîó Langchain',\n",
|
||||
" 'description': 'This example goes over how to load data from the hacker news website, using Cheerio. One document will be created for each page.',\n",
|
||||
" 'language': 'en'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].metadata"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RST\n",
|
||||
"\n",
|
||||
">A [reStructured Text (RST)](https://en.wikipedia.org/wiki/ReStructuredText) file is a file format for textual data used primarily in the Python programming language community for technical documentation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `UnstructuredRSTLoader`\n",
|
||||
"\n",
|
||||
"You can load data from RST files with `UnstructuredRSTLoader` using the following workflow."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredRSTLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredRSTLoader(\n",
|
||||
" file_path=\"example_data/README.rst\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Example Docs' metadata={'source': 'example_data/README.rst', 'filename': 'README.rst', 'file_directory': 'example_data', 'filetype': 'text/x-rst', 'page_number': 1, 'category': 'Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,419 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "213a38a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Source Code\n",
|
||||
"\n",
|
||||
"This notebook covers how to load source code files using a special approach with language parsing: each top-level function and class in the code is loaded into separate documents. Any remaining code top-level code outside the already loaded functions and classes will be loaded into a seperate document.\n",
|
||||
"\n",
|
||||
"This approach can potentially improve the accuracy of QA models over source code. Currently, the supported languages for code parsing are Python and JavaScript. The language used for parsing can be configured, along with the minimum number of lines required to activate the splitting based on syntax."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7fa47b2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install esprima"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "beb55c2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import warnings\n",
|
||||
"warnings.filterwarnings('ignore')\n",
|
||||
"from pprint import pprint\n",
|
||||
"from langchain.text_splitter import Language\n",
|
||||
"from langchain.document_loaders.generic import GenericLoader\n",
|
||||
"from langchain.document_loaders.parsers import LanguageParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64056e07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"./example_data/source_code\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".py\", \".js\"],\n",
|
||||
" parser=LanguageParser()\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8af79bd7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"6"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "85edf3fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.PYTHON: 'python'>,\n",
|
||||
" 'source': 'example_data/source_code/example.py'}\n",
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.PYTHON: 'python'>,\n",
|
||||
" 'source': 'example_data/source_code/example.py'}\n",
|
||||
"{'content_type': 'simplified_code',\n",
|
||||
" 'language': <Language.PYTHON: 'python'>,\n",
|
||||
" 'source': 'example_data/source_code/example.py'}\n",
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.JS: 'js'>,\n",
|
||||
" 'source': 'example_data/source_code/example.js'}\n",
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.JS: 'js'>,\n",
|
||||
" 'source': 'example_data/source_code/example.js'}\n",
|
||||
"{'content_type': 'simplified_code',\n",
|
||||
" 'language': <Language.JS: 'js'>,\n",
|
||||
" 'source': 'example_data/source_code/example.js'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for document in docs:\n",
|
||||
" pprint(document.metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f44e3e37",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"class MyClass:\n",
|
||||
" def __init__(self, name):\n",
|
||||
" self.name = name\n",
|
||||
"\n",
|
||||
" def greet(self):\n",
|
||||
" print(f\"Hello, {self.name}!\")\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" name = input(\"Enter your name: \")\n",
|
||||
" obj = MyClass(name)\n",
|
||||
" obj.greet()\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"# Code for: class MyClass:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Code for: def main():\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" main()\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"class MyClass {\n",
|
||||
" constructor(name) {\n",
|
||||
" this.name = name;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" greet() {\n",
|
||||
" console.log(`Hello, ${this.name}!`);\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"function main() {\n",
|
||||
" const name = prompt(\"Enter your name:\");\n",
|
||||
" const obj = new MyClass(name);\n",
|
||||
" obj.greet();\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"// Code for: class MyClass {\n",
|
||||
"\n",
|
||||
"// Code for: function main() {\n",
|
||||
"\n",
|
||||
"main();\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\"\\n\\n--8<--\\n\\n\".join([document.page_content for document in docs]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69aad0ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The parser can be disabled for small files. \n",
|
||||
"\n",
|
||||
"The parameter `parser_threshold` indicates the minimum number of lines that the source code file must have to be segmented using the parser."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ae024794",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"./example_data/source_code\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".py\"],\n",
|
||||
" parser=LanguageParser(language=Language.PYTHON, parser_threshold=1000)\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5d3b372a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "89e546ad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"class MyClass:\n",
|
||||
" def __init__(self, name):\n",
|
||||
" self.name = name\n",
|
||||
"\n",
|
||||
" def greet(self):\n",
|
||||
" print(f\"Hello, {self.name}!\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" name = input(\"Enter your name: \")\n",
|
||||
" obj = MyClass(name)\n",
|
||||
" obj.greet()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" main()\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9c71e61",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Splitting\n",
|
||||
"\n",
|
||||
"Additional splitting could be needed for those functions, classes, or scripts that are too big."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "adbaa79f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"./example_data/source_code\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".js\"],\n",
|
||||
" parser=LanguageParser(language=Language.JS)\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "c44c0d3f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import (\n",
|
||||
" RecursiveCharacterTextSplitter,\n",
|
||||
" Language,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b1e0053d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"js_splitter = RecursiveCharacterTextSplitter.from_language(\n",
|
||||
" language=Language.JS, chunk_size=60, chunk_overlap=0\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "7dbe6188",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = js_splitter.split_documents(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "8a80d089",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"7"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "000a6011",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"class MyClass {\n",
|
||||
" constructor(name) {\n",
|
||||
" this.name = name;\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"greet() {\n",
|
||||
" console.log(`Hello, ${this.name}!`);\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"function main() {\n",
|
||||
" const name = prompt(\"Enter your name:\");\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"const obj = new MyClass(name);\n",
|
||||
" obj.greet();\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"// Code for: class MyClass {\n",
|
||||
"\n",
|
||||
"// Code for: function main() {\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"main();\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\"\\n\\n--8<--\\n\\n\".join([document.page_content for document in result]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a634365e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tencent COS Directory\n",
|
||||
"\n",
|
||||
"This covers how to load document objects from a `Tencent COS Directory`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "85e97267",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#! pip install cos-python-sdk-v5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2f0cd6a5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TencentCOSDirectoryLoader\n",
|
||||
"from qcloud_cos import CosConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "321cc7f1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conf = CosConfig(\n",
|
||||
" Region=\"your cos region\",\n",
|
||||
" SecretId=\"your cos secret_id\",\n",
|
||||
" SecretKey=\"your cos secret_key\",\n",
|
||||
" )\n",
|
||||
"loader = TencentCOSDirectoryLoader(conf=conf, bucket=\"you_cos_bucket\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c50d2c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0690c40a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Specifying a prefix\n",
|
||||
"You can also specify a prefix for more finegrained control over what files to load."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "72d44781",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TencentCOSDirectoryLoader(conf=conf, bucket=\"you_cos_bucket\", prefix=\"fake\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d3c32db",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a634365e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tencent COS File\n",
|
||||
"\n",
|
||||
"This covers how to load document object from a `Tencent COS File`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "85e97267",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#! pip install cos-python-sdk-v5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2f0cd6a5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TencentCOSFileLoader\n",
|
||||
"from qcloud_cos import CosConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "321cc7f1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conf = CosConfig(\n",
|
||||
" Region=\"your cos region\",\n",
|
||||
" SecretId=\"your cos secret_id\",\n",
|
||||
" SecretKey=\"your cos secret_key\",\n",
|
||||
" )\n",
|
||||
"loader = TencentCOSFileLoader(conf=conf, bucket=\"you_cos_bucket\", key=\"fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c50d2c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0690c40a",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -226,7 +226,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8de9ef16",
|
||||
"metadata": {},
|
||||
@@ -303,7 +302,7 @@
|
||||
"source": [
|
||||
"## Unstructured API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. Note that currently (as of 11 May 2023) the Unstructured API is open, but it will soon require an API. The [Unstructured documentation](https://unstructured-io.github.io/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. You can generate a free Unstructured API key [here](https://www.unstructured.io/api-key/). The [Unstructured documentation](https://unstructured-io.github.io/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -224,13 +224,33 @@
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Using proxies\n",
|
||||
"\n",
|
||||
"Sometimes you might need to use proxies to get around IP blocks. You can pass in a dictionary of proxies to the loader (and `requests` underneath) to use them."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1dd8ab23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"loader = WebBaseLoader(\n",
|
||||
" \"https://www.walmart.com/search?q=parrots\", proxies={\n",
|
||||
" \"http\": \"http://{username}:{password}:@proxy.service.com:6666/\",\n",
|
||||
" \"https\": \"https://{username}:{password}:@proxy.service.com:6666/\"\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -0,0 +1,214 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8cc82b48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MultiQueryRetriever\n",
|
||||
"\n",
|
||||
"Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce difference results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n",
|
||||
"\n",
|
||||
"The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c2f3f5f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Build a sample vectorDB\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.document_loaders import PyPDFLoader\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"# Load PDF\n",
|
||||
"path=\"path-to-files\"\n",
|
||||
"loaders = [\n",
|
||||
" PyPDFLoader(path+\"docs/cs229_lectures/MachineLearning-Lecture01.pdf\"),\n",
|
||||
" PyPDFLoader(path+\"docs/cs229_lectures/MachineLearning-Lecture02.pdf\"),\n",
|
||||
" PyPDFLoader(path+\"docs/cs229_lectures/MachineLearning-Lecture03.pdf\")\n",
|
||||
"]\n",
|
||||
"docs = []\n",
|
||||
"for loader in loaders:\n",
|
||||
" docs.extend(loader.load())\n",
|
||||
" \n",
|
||||
"# Split\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500,chunk_overlap = 150)\n",
|
||||
"splits = text_splitter.split_documents(docs)\n",
|
||||
"\n",
|
||||
"# VectorDB\n",
|
||||
"embedding = OpenAIEmbeddings()\n",
|
||||
"vectordb = Chroma.from_documents(documents=splits,embedding=embedding)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cca8f56c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Simple usage`\n",
|
||||
"\n",
|
||||
"Specify the LLM to use for query generation, and the retriver will do the rest."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "edbca101",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
|
||||
"question=\"What does the course say about regression?\"\n",
|
||||
"num_queries=3\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vectordb.as_retriever(),llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e5203612",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:root:Generated queries: [\"1. What is the course's perspective on regression?\", '2. How does the course discuss regression?', '3. What information does the course provide about regression?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"6"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"unique_docs = retriever_from_llm.get_relevant_documents(question=\"What does the course say about regression?\")\n",
|
||||
"len(unique_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c54a282f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Supplying your own prompt`\n",
|
||||
"\n",
|
||||
"You can also supply a prompt along with an output parser to split the results into a list of queries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d9afb0ca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain import LLMChain\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"\n",
|
||||
"# Output parser will split the LLM result into a list of queries\n",
|
||||
"class LineList(BaseModel):\n",
|
||||
" # \"lines\" is the key (attribute name) of the parsed output\n",
|
||||
" lines: List[str] = Field(description=\"Lines of text\")\n",
|
||||
"\n",
|
||||
"class LineListOutputParser(PydanticOutputParser):\n",
|
||||
" def __init__(self) -> None:\n",
|
||||
" super().__init__(pydantic_object=LineList)\n",
|
||||
" def parse(self, text: str) -> LineList:\n",
|
||||
" lines = text.strip().split(\"\\n\")\n",
|
||||
" return LineList(lines=lines)\n",
|
||||
"\n",
|
||||
"output_parser = LineListOutputParser()\n",
|
||||
" \n",
|
||||
"QUERY_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=\"\"\"You are an AI language model assistant. Your task is to generate five \n",
|
||||
" different versions of the given user question to retrieve relevant documents from a vector \n",
|
||||
" database. By generating multiple perspectives on the user question, your goal is to help\n",
|
||||
" the user overcome some of the limitations of the distance-based similarity search. \n",
|
||||
" Provide these alternative questions seperated by newlines.\n",
|
||||
" Original question: {question}\"\"\",\n",
|
||||
")\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"llm_chain = LLMChain(llm=llm,prompt=QUERY_PROMPT,output_parser=output_parser)\n",
|
||||
" \n",
|
||||
"# Other inputs\n",
|
||||
"question=\"What does the course say about regression?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "6660d7ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:root:Generated queries: [\"1. What is the course's perspective on regression?\", '2. Can you provide information on regression as discussed in the course?', '3. How does the course cover the topic of regression?', \"4. What are the course's teachings on regression?\", '5. In relation to the course, what is mentioned about regression?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"8"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Run\n",
|
||||
"retriever = MultiQueryRetriever(retriever=vectordb.as_retriever(), \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output\n",
|
||||
"\n",
|
||||
"# Results\n",
|
||||
"unique_docs = retriever.get_relevant_documents(question=\"What does the course say about regression?\")\n",
|
||||
"len(unique_docs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -148,7 +148,7 @@
|
||||
" # This will teach the LLM to use it as a column when constructing filter.\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"length(genre)\",\n",
|
||||
" description=\"The lenth of genres of the movie\", \n",
|
||||
" description=\"The length of genres of the movie\", \n",
|
||||
" type=\"integer\", \n",
|
||||
" ),\n",
|
||||
" # Now you can define a column as timestamp. By simply set the type to timestamp.\n",
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install boto3"
|
||||
"%pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -36,7 +36,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import boto3\n",
|
||||
"from langchain.retrievers import AwsKendraIndexRetriever"
|
||||
"from langchain.retrievers import AmazonKendraRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -53,11 +53,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kclient = boto3.client(\"kendra\", region_name=\"us-east-1\")\n",
|
||||
"\n",
|
||||
"retriever = AwsKendraIndexRetriever(\n",
|
||||
" kclient=kclient,\n",
|
||||
" kendraindex=\"kendraindex\",\n",
|
||||
"retriever = AmazonKendraRetriever(\n",
|
||||
" index_id=\"c0806df7-e76b-4bce-9b5c-d5582f6b1a03\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -66,7 +63,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can use retrieved documents from AWS Kendra Index"
|
||||
"Now you can use retrieved documents from Kendra index"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2,28 +2,34 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Alibaba Cloud OpenSearch\n",
|
||||
"\n",
|
||||
">[Alibaba Cloud Opensearch](https://www.alibabacloud.com/product/opensearch) OpenSearch is a one-stop platform to develop intelligent search services. OpenSearch was built based on the large-scale distributed search engine developed by Alibaba. OpenSearch serves more than 500 business cases in Alibaba Group and thousands of Alibaba Cloud customers. OpenSearch helps develop search services in different search scenarios, including e-commerce, O2O, multimedia, the content industry, communities and forums, and big data query in enterprises.\n",
|
||||
">[Alibaba Cloud Opensearch](https://www.alibabacloud.com/product/opensearch) is a one-stop platform to develop intelligent search services. `OpenSearch` was built on the large-scale distributed search engine developed by `Alibaba`. `OpenSearch` serves more than 500 business cases in Alibaba Group and thousands of Alibaba Cloud customers. `OpenSearch` helps develop search services in different search scenarios, including e-commerce, O2O, multimedia, the content industry, communities and forums, and big data query in enterprises.\n",
|
||||
"\n",
|
||||
">OpenSearch helps you develop high quality, maintenance-free, and high performance intelligent search services to provide your users with high search efficiency and accuracy.\n",
|
||||
">`OpenSearch` helps you develop high quality, maintenance-free, and high performance intelligent search services to provide your users with high search efficiency and accuracy.\n",
|
||||
"\n",
|
||||
">OpenSearch provides the vector search feature. In specific scenarios, especially test question search and image search scenarios, you can use the vector search feature together with the multimodal search feature to improve the accuracy of search results. This topic describes the syntax and usage notes of vector indexes.\n",
|
||||
">`OpenSearch` provides the vector search feature. In specific scenarios, especially test question search and image search scenarios, you can use the vector search feature together with the multimodal search feature to improve the accuracy of search results. This topic describes the syntax and usage notes of vector indexes.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Alibaba Cloud OpenSearch Vector Search Edition`.\n",
|
||||
"To run, you should have an [OpenSearch Vector Search Edition](https://opensearch.console.aliyun.com) instance up and running:\n",
|
||||
"- Read the [help document](https://www.alibabacloud.com/help/en/opensearch/latest/vector-search) to quickly familiarize and configure OpenSearch Vector Search Edition instance.\n"
|
||||
"\n",
|
||||
"Read the [help document](https://www.alibabacloud.com/help/en/opensearch/latest/vector-search) to quickly familiarize and configure OpenSearch Vector Search Edition instance.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install alibabacloud-ha3engine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After completing the configuration, follow these steps to connect to the instance, index documents, and perform vector retrieval."
|
||||
]
|
||||
@@ -33,6 +39,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -49,9 +58,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Split documents and get embeddings by call OpenAI API"
|
||||
]
|
||||
@@ -61,6 +68,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -80,7 +90,6 @@
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
@@ -94,6 +103,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -133,9 +145,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create an opensearch access instance by settings."
|
||||
]
|
||||
@@ -145,6 +155,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -159,9 +172,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"or"
|
||||
]
|
||||
@@ -171,6 +182,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -183,9 +197,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add texts and build index."
|
||||
]
|
||||
@@ -195,6 +207,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -208,9 +223,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Query and retrieve data."
|
||||
]
|
||||
@@ -220,6 +233,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -233,9 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Query and retrieve data with metadata\n"
|
||||
]
|
||||
@@ -245,6 +259,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -260,7 +277,6 @@
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
@@ -272,23 +288,23 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AwaDB\n",
|
||||
"[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n",
|
||||
"This notebook shows how to use functionality related to the AwaDB."
|
||||
">[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `AwaDB`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -184,7 +185,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure Cognitive Search"
|
||||
"# Azure Cognitive Search\n",
|
||||
"\n",
|
||||
">[Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) (formerly known as `Azure Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Install Azure Cognitive Search SDK"
|
||||
"## Install Azure Cognitive Search SDK"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -27,7 +27,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -49,7 +48,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -74,7 +72,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -95,7 +92,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -120,7 +116,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -148,7 +143,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -187,7 +181,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -226,7 +219,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.13 ('.venv': venv)",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -240,9 +233,8 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "645053d6307d413a1a75681b5ebb6449bb2babba4bcb0bf65a1ddc3dbefb108a"
|
||||
@@ -250,5 +242,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -1,107 +1,53 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chroma\n",
|
||||
"\n",
|
||||
">[Chroma](https://docs.trychroma.com/getting-started) is a database for building AI applications with embeddings.\n",
|
||||
">[Chroma](https://docs.trychroma.com/getting-started) is a AI-native open-source vector database focused on developer productivity and happiness. Chroma is licensed under Apache 2.0.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Chroma` vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0825fa4a-d950-4e78-8bba-20cfcc347765",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "42080f37-8fd1-4cec-acd9-15d2b03b2f4d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# get a token: https://platform.openai.com/account/api-keys\n",
|
||||
"\n",
|
||||
"from getpass import getpass\n",
|
||||
"Install Chroma with:\n",
|
||||
"\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c7a94d6c-b4d4-4498-9bdd-eb50c92b85c5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"```sh\n",
|
||||
"pip install chromadb\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"Chroma runs in various modes. See below for examples of each integrated with LangChain.\n",
|
||||
"- `in-memory` - in a python script or jupyter notebook\n",
|
||||
"- `in-memory with persistance` - in a script or notebook and save/load to disk\n",
|
||||
"- `in a docker container` - as a server running your local machine or in the cloud\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
"Like any other database, you can: \n",
|
||||
"- `.add` \n",
|
||||
"- `.get` \n",
|
||||
"- `.update`\n",
|
||||
"- `.upsert`\n",
|
||||
"- `.delete`\n",
|
||||
"- `.peek`\n",
|
||||
"- and `.query` runs the similarity search.\n",
|
||||
"\n",
|
||||
"View full docs at [docs](https://docs.trychroma.com/reference/Collection). To access these methods directly, you can do `._collection_.method()`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b5ffbf8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Example\n",
|
||||
"\n",
|
||||
"In this basic example, we take the most recent State of the Union Address, split it into chunks, embed it using an open-source embedding model, load it into Chroma, and then query it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5eabdb75",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 14,
|
||||
"id": "ae9fcf3e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
@@ -109,21 +55,7 @@
|
||||
"text": [
|
||||
"Using embedded DuckDB without persistence: data will be transient\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db = Chroma.from_documents(docs, embeddings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = db.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4b172de8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
@@ -139,20 +71,312 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# import\n",
|
||||
"from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"# load the document and split it into chunks\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"# split it into chunks\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"# create the open-source embedding function\n",
|
||||
"embedding_function = SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
"\n",
|
||||
"# load it into Chroma\n",
|
||||
"db = Chroma.from_documents(docs, embedding_function)\n",
|
||||
"\n",
|
||||
"# query it\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = db.similarity_search(query)\n",
|
||||
"\n",
|
||||
"# print results\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5c9a11cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Example (including saving to disk)\n",
|
||||
"\n",
|
||||
"Extending the previous example, if you want to save to disk, simply initialize the Chroma client and pass the directory where you want the data to be saved to. \n",
|
||||
"\n",
|
||||
"`Caution`: Chroma makes a best-effort to automatically save data to disk, however multiple in-memory clients can stomp each other's work. As a best practice, only have one client per path running at any given time.\n",
|
||||
"\n",
|
||||
"`Protip`: Sometimes you can call `db.persist()` to force a save. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "49f9bd49",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using embedded DuckDB with persistence: data will be stored in: ./chroma_db\n",
|
||||
"Using embedded DuckDB with persistence: data will be stored in: ./chroma_db\n",
|
||||
"No embedding_function provided, using default embedding function: SentenceTransformerEmbeddingFunction\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# save to disk\n",
|
||||
"db2 = Chroma.from_documents(docs, embedding_function, persist_directory=\"./chroma_db\")\n",
|
||||
"db2.persist()\n",
|
||||
"docs = db.similarity_search(query)\n",
|
||||
"\n",
|
||||
"# load from disk\n",
|
||||
"db3 = Chroma(persist_directory=\"./chroma_db\")\n",
|
||||
"docs = db.similarity_search(query)\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9cf6d70",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Example (using the Docker Container)\n",
|
||||
"\n",
|
||||
"You can also run the Chroma Server in a Docker container separately, create a Client to connect to it, and then pass that to LangChain. \n",
|
||||
"\n",
|
||||
"Chroma has the ability to handle multiple `Collections` of documents, but the LangChain interface expects one, so we need to specify the collection name. The default collection name used by LangChain is \"langchain\".\n",
|
||||
"\n",
|
||||
"Here is how to clone, build, and run the Docker Image:\n",
|
||||
"```\n",
|
||||
"git clone git@github.com:chroma-core/chroma.git\n",
|
||||
"docker-compose up -d --build\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "74aee70e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"No embedding_function provided, using default embedding function: SentenceTransformerEmbeddingFunction\n",
|
||||
"No embedding_function provided, using default embedding function: SentenceTransformerEmbeddingFunction\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create the chroma client\n",
|
||||
"import chromadb\n",
|
||||
"import uuid\n",
|
||||
"from chromadb.config import Settings\n",
|
||||
"client = chromadb.Client(Settings(chroma_api_impl=\"rest\",\n",
|
||||
" chroma_server_host=\"localhost\",\n",
|
||||
" chroma_server_http_port=\"8000\"\n",
|
||||
" ))\n",
|
||||
"client.reset() # resets the database\n",
|
||||
"collection = client.create_collection(\"my_collection\")\n",
|
||||
"for doc in docs:\n",
|
||||
" collection.add(ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content)\n",
|
||||
"\n",
|
||||
"# tell LangChain to use our client and collection name\n",
|
||||
"db4 = Chroma(client=client, collection_name=\"my_collection\")\n",
|
||||
"docs = db.similarity_search(query)\n",
|
||||
"print(docs[0].page_content)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ed3ec50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Update and Delete\n",
|
||||
"\n",
|
||||
"While building toward a real application, you want to go beyond adding data, and also update and delete data. \n",
|
||||
"\n",
|
||||
"Chroma has users provide `ids` to simplify the bookkeeping here. `ids` can be the name of the file, or a combined has like `filename_paragraphNumber`, etc.\n",
|
||||
"\n",
|
||||
"Chroma supports all these operations - though some of them are still being integrated all the way through the LangChain interface. Additional workflow improvements will be added soon.\n",
|
||||
"\n",
|
||||
"Here is a basic example showing how to do various operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "81a02810",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using embedded DuckDB without persistence: data will be transient\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '../../../state_of_the_union.txt', 'new_value': 'hello world'}\n",
|
||||
"{'ids': ['1'], 'embeddings': None, 'documents': ['Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.'], 'metadatas': [{'source': '../../../state_of_the_union.txt', 'new_value': 'hello world'}]}\n",
|
||||
"count before 4\n",
|
||||
"count after 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create simple ids\n",
|
||||
"ids = [str(i) for i in range(1, len(docs)+1)]\n",
|
||||
"\n",
|
||||
"# add data\n",
|
||||
"example_db = Chroma.from_documents(docs, embedding_function, ids=ids)\n",
|
||||
"docs = example_db.similarity_search(query)\n",
|
||||
"print(docs[0].metadata)\n",
|
||||
"\n",
|
||||
"# update the metadata for a document\n",
|
||||
"docs[0].metadata = {'source': '../../../state_of_the_union.txt', 'new_value': 'hello world'}\n",
|
||||
"example_db.update_document(ids[0], docs[0])\n",
|
||||
"print(example_db._collection.get(ids=[ids[0]]))\n",
|
||||
"\n",
|
||||
"# delete the last document\n",
|
||||
"print(\"count before\", example_db._collection.count())\n",
|
||||
"example_db._collection.delete(ids=[ids[-1]])\n",
|
||||
"print(\"count after\", example_db._collection.count())\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac6bc71a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use OpenAI Embeddings\n",
|
||||
"\n",
|
||||
"Many people like to use OpenAIEmbeddings, here is how to set that up."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "42080f37-8fd1-4cec-acd9-15d2b03b2f4d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get a token: https://platform.openai.com/account/api-keys\n",
|
||||
"\n",
|
||||
"from getpass import getpass\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "c7a94d6c-b4d4-4498-9bdd-eb50c92b85c5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "5eabdb75",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using embedded DuckDB without persistence: data will be transient\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"db5 = Chroma.from_documents(docs, embeddings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = db.similarity_search(query)\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d9c28ad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***\n",
|
||||
"\n",
|
||||
"## Other Information"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18152965",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score"
|
||||
"### Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "346347d7",
|
||||
"metadata": {},
|
||||
@@ -197,127 +421,15 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8061454b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Persistance\n",
|
||||
"\n",
|
||||
"The below steps cover how to persist a ChromaDB instance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "2b76db26",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialize PeristedChromaDB\n",
|
||||
"Create embeddings for each chunk and insert into the Chroma vector database. The persist_directory argument tells ChromaDB where to store the database when it's persisted.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "cdb86e0d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"No existing DB found in db, skipping load\n",
|
||||
"No existing DB found in db, skipping load\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Embed and store the texts\n",
|
||||
"# Supplying a persist_directory will store the embeddings on disk\n",
|
||||
"persist_directory = \"db\"\n",
|
||||
"\n",
|
||||
"embedding = OpenAIEmbeddings()\n",
|
||||
"vectordb = Chroma.from_documents(\n",
|
||||
" documents=docs, embedding=embedding, persist_directory=persist_directory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f568a322",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Persist the Database\n",
|
||||
"We should call persist() to ensure the embeddings are written to disk."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "74b08cb4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Persisting DB to disk, putting it in the save folder db\n",
|
||||
"PersistentDuckDB del, about to run persist\n",
|
||||
"Persisting DB to disk, putting it in the save folder db\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectordb.persist()\n",
|
||||
"vectordb = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cc9ed900",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load the Database from disk, and create the chain\n",
|
||||
"Be sure to pass the same persist_directory and embedding_function as you did when you instantiated the database. Initialize the chain we will use for question answering."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "31fecfe9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"loaded in 4 embeddings\n",
|
||||
"loaded in 1 collections\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now we can load the persisted database from disk, and use it as normal.\n",
|
||||
"vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "794a7552",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retriever options\n",
|
||||
"### Retriever options\n",
|
||||
"\n",
|
||||
"This section goes over different options for how to use Chroma as a retriever.\n",
|
||||
"\n",
|
||||
"### MMR\n",
|
||||
"#### MMR\n",
|
||||
"\n",
|
||||
"In addition to using similarity search in the retriever object, you can also use `mmr`."
|
||||
]
|
||||
@@ -354,79 +466,70 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "2a877f08",
|
||||
"id": "275dbd0a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Updating a Document\n",
|
||||
"The `update_document` function allows you to modify the content of a document in the Chroma instance after it has been added. Let's see an example of how to use this function."
|
||||
"### Filtering on metadata\n",
|
||||
"\n",
|
||||
"It can be helpful to narrow down the collection before working with it.\n",
|
||||
"\n",
|
||||
"For example, collections can be filtered on metadata using the get method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "a559c3f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import Document class\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"\n",
|
||||
"# Initial document content and id\n",
|
||||
"initial_content = \"This is an initial document content\"\n",
|
||||
"document_id = \"doc1\"\n",
|
||||
"\n",
|
||||
"# Create an instance of Document with initial content and metadata\n",
|
||||
"original_doc = Document(page_content=initial_content, metadata={\"page\": \"0\"})\n",
|
||||
"\n",
|
||||
"# Initialize a Chroma instance with the original document\n",
|
||||
"new_db = Chroma.from_documents(\n",
|
||||
" collection_name=\"test_collection\",\n",
|
||||
" documents=[original_doc],\n",
|
||||
" embedding=OpenAIEmbeddings(), # using the same embeddings as before\n",
|
||||
" ids=[document_id],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "60a7c273",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"At this point, we have a new Chroma instance with a single document \"This is an initial document content\" with id \"doc1\". Now, let's update the content of the document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "55e48056",
|
||||
"execution_count": 17,
|
||||
"id": "a5119221",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"This is the updated document content {'page': '1'}\n"
|
||||
"{'source': 'some_other_source'}\n",
|
||||
"{'ids': ['1'], 'embeddings': None, 'documents': ['Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.'], 'metadatas': [{'source': 'some_other_source'}]}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Updated document content\n",
|
||||
"updated_content = \"This is the updated document content\"\n",
|
||||
"# create simple ids\n",
|
||||
"ids = [str(i) for i in range(1, len(docs) + 1)]\n",
|
||||
"\n",
|
||||
"# Create a new Document instance with the updated content\n",
|
||||
"updated_doc = Document(page_content=updated_content, metadata={\"page\": \"1\"})\n",
|
||||
"# add data\n",
|
||||
"example_db = Chroma.from_documents(docs, embedding_function, ids=ids)\n",
|
||||
"docs = example_db.similarity_search(query)\n",
|
||||
"print(docs[0].metadata)\n",
|
||||
"\n",
|
||||
"# Update the document in the Chroma instance by passing the document id and the updated document\n",
|
||||
"new_db.update_document(document_id=document_id, document=updated_doc)\n",
|
||||
"\n",
|
||||
"# Now, let's retrieve the updated document using similarity search\n",
|
||||
"output = new_db.similarity_search(updated_content, k=1)\n",
|
||||
"\n",
|
||||
"# Print the content of the retrieved document\n",
|
||||
"print(output[0].page_content, output[0].metadata)"
|
||||
"# update the source for a document\n",
|
||||
"docs[0].metadata = {\"source\": \"some_other_source\"}\n",
|
||||
"example_db.update_document(ids[0], docs[0])\n",
|
||||
"print(example_db._collection.get(ids=[ids[0]]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "81600dc1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'ids': ['1'],\n",
|
||||
" 'embeddings': None,\n",
|
||||
" 'documents': ['Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.'],\n",
|
||||
" 'metadatas': [{'source': 'some_other_source'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# filter collection for updated source\n",
|
||||
"example_db.get(where={\"source\": \"some_other_source\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -14,22 +14,12 @@
|
||||
"This notebook shows how to use functionality related to the `Elasticsearch` database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# ElasticVectorSearch class"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tKSYjyTBtSLc"
|
||||
},
|
||||
"id": "tKSYjyTBtSLc"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409"
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Installation"
|
||||
@@ -104,8 +94,8 @@
|
||||
"execution_count": null,
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c"
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -117,9 +107,9 @@
|
||||
"execution_count": null,
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
"outputId": "fd16b37f-cb76-40a9-b83f-eab58dd0d912"
|
||||
"outputId": "fd16b37f-cb76-40a9-b83f-eab58dd0d912",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -141,8 +131,8 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0"
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Example"
|
||||
@@ -153,8 +143,8 @@
|
||||
"execution_count": null,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "aac9563e"
|
||||
"id": "aac9563e",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -169,8 +159,8 @@
|
||||
"execution_count": null,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "a3c3999a"
|
||||
"id": "a3c3999a",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -189,8 +179,8 @@
|
||||
"execution_count": null,
|
||||
"id": "12eb86d8",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "12eb86d8"
|
||||
"id": "12eb86d8",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -235,43 +225,49 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# ElasticKnnSearch Class\n",
|
||||
"The `ElasticKnnSearch` implements features allowing storing vectors and documents in Elasticsearch for use with approximate [kNN search](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html)"
|
||||
],
|
||||
"id": "FheGPztJsrRB",
|
||||
"metadata": {
|
||||
"id": "FheGPztJsrRB"
|
||||
},
|
||||
"id": "FheGPztJsrRB"
|
||||
"source": [
|
||||
"# ElasticKnnSearch Class\n",
|
||||
"The `ElasticKnnSearch` implements features allowing storing vectors and documents in Elasticsearch for use with approximate [kNN search](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!pip install langchain elasticsearch"
|
||||
],
|
||||
"execution_count": null,
|
||||
"id": "gRVcbh5zqCJQ",
|
||||
"metadata": {
|
||||
"id": "gRVcbh5zqCJQ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "gRVcbh5zqCJQ"
|
||||
"source": [
|
||||
"!pip install langchain elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "TJtqiw5AqBp8",
|
||||
"metadata": {
|
||||
"id": "TJtqiw5AqBp8"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores.elastic_vector_search import ElasticKnnSearch\n",
|
||||
"from langchain.embeddings import ElasticsearchEmbeddings\n",
|
||||
"import elasticsearch"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TJtqiw5AqBp8"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "TJtqiw5AqBp8"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "XHfC0As6qN3T",
|
||||
"metadata": {
|
||||
"id": "XHfC0As6qN3T"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ElasticsearchEmbeddings\n",
|
||||
"model_id = \"<model_id_from_es>\"\n",
|
||||
@@ -281,16 +277,16 @@
|
||||
"es_password = \"es_pass\"\n",
|
||||
"test_index = \"<index_name>\"\n",
|
||||
"# input_field = \"your_input_field\" # if different from 'text_field'"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XHfC0As6qN3T"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "XHfC0As6qN3T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "UkTipx1lqc3h",
|
||||
"metadata": {
|
||||
"id": "UkTipx1lqc3h"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate embedding object\n",
|
||||
"embeddings = ElasticsearchEmbeddings.from_credentials(\n",
|
||||
@@ -300,16 +296,16 @@
|
||||
" es_user=es_user,\n",
|
||||
" es_password=es_password,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UkTipx1lqc3h"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "UkTipx1lqc3h"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "74psgD0oqjYK",
|
||||
"metadata": {
|
||||
"id": "74psgD0oqjYK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ElasticKnnSearch\n",
|
||||
"knn_search = ElasticKnnSearch(\n",
|
||||
@@ -319,26 +315,26 @@
|
||||
" index_name=test_index,\n",
|
||||
" embedding=embeddings,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "74psgD0oqjYK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "74psgD0oqjYK"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test adding vectors"
|
||||
],
|
||||
"id": "7AfgIKLWqnQl",
|
||||
"metadata": {
|
||||
"id": "7AfgIKLWqnQl"
|
||||
},
|
||||
"id": "7AfgIKLWqnQl"
|
||||
"source": [
|
||||
"## Test adding vectors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "yNUUIaL9qmze",
|
||||
"metadata": {
|
||||
"id": "yNUUIaL9qmze"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `add_texts` method\n",
|
||||
"texts = [\"Hello, world!\", \"Machine learning is fun.\", \"I love Python.\"]\n",
|
||||
@@ -351,26 +347,26 @@
|
||||
" \"Python is great for data analysis.\",\n",
|
||||
"]\n",
|
||||
"knn_search.from_texts(new_texts, dims=dims)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yNUUIaL9qmze"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "yNUUIaL9qmze"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test knn search using query vector builder "
|
||||
],
|
||||
"id": "0zdR-Iubquov",
|
||||
"metadata": {
|
||||
"id": "0zdR-Iubquov"
|
||||
},
|
||||
"id": "0zdR-Iubquov"
|
||||
"source": [
|
||||
"## Test knn search using query vector builder "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bwR4jYvqqxTo",
|
||||
"metadata": {
|
||||
"id": "bwR4jYvqqxTo"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -387,26 +383,26 @@
|
||||
"print(\n",
|
||||
" f\"The 'text' field value from the top hit is: '{hybrid_result['hits']['hits'][0]['_source']['text']}'\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bwR4jYvqqxTo"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "bwR4jYvqqxTo"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test knn search using pre generated vector \n"
|
||||
],
|
||||
"id": "ltXYqp0qqz7R",
|
||||
"metadata": {
|
||||
"id": "ltXYqp0qqz7R"
|
||||
},
|
||||
"id": "ltXYqp0qqz7R"
|
||||
"source": [
|
||||
"## Test knn search using pre generated vector \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "O5COtpTqq23t",
|
||||
"metadata": {
|
||||
"id": "O5COtpTqq23t"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate embedding for tests\n",
|
||||
"query_text = \"Hello\"\n",
|
||||
@@ -428,26 +424,26 @@
|
||||
"print(\n",
|
||||
" f\"The 'text' field value from the top hit is: '{knn_result['hits']['hits'][0]['_source']['text']}'\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "O5COtpTqq23t"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "O5COtpTqq23t"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test source option"
|
||||
],
|
||||
"id": "0dnmimcJq42C",
|
||||
"metadata": {
|
||||
"id": "0dnmimcJq42C"
|
||||
},
|
||||
"id": "0dnmimcJq42C"
|
||||
"source": [
|
||||
"## Test source option"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "v4_B72nHq7g1",
|
||||
"metadata": {
|
||||
"id": "v4_B72nHq7g1"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -460,26 +456,26 @@
|
||||
" query=query, model_id=model_id, k=2, source=False\n",
|
||||
")\n",
|
||||
"assert not \"_source\" in hybrid_result[\"hits\"][\"hits\"][0].keys()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "v4_B72nHq7g1"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "v4_B72nHq7g1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test fields option "
|
||||
],
|
||||
"id": "teHgJgrlq-Jb",
|
||||
"metadata": {
|
||||
"id": "teHgJgrlq-Jb"
|
||||
},
|
||||
"id": "teHgJgrlq-Jb"
|
||||
"source": [
|
||||
"## Test fields option "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "utNBbpZYrAYW",
|
||||
"metadata": {
|
||||
"id": "utNBbpZYrAYW"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -492,72 +488,72 @@
|
||||
" query=query, model_id=model_id, k=2, fields=[\"text\"]\n",
|
||||
")\n",
|
||||
"assert \"text\" in hybrid_result[\"hits\"][\"hits\"][0][\"fields\"].keys()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "utNBbpZYrAYW"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "utNBbpZYrAYW"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Test with es client connection rather than cloud_id "
|
||||
],
|
||||
"id": "hddsIFferBy1",
|
||||
"metadata": {
|
||||
"id": "hddsIFferBy1"
|
||||
},
|
||||
"id": "hddsIFferBy1"
|
||||
"source": [
|
||||
"### Test with es client connection rather than cloud_id "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bXqrUnoirFia",
|
||||
"metadata": {
|
||||
"id": "bXqrUnoirFia"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create Elasticsearch connection\n",
|
||||
"es_connection = Elasticsearch(\n",
|
||||
" hosts=[\"https://es_cluster_url:port\"], basic_auth=(\"user\", \"password\")\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bXqrUnoirFia"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "bXqrUnoirFia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "TIM__Hm8rSEW",
|
||||
"metadata": {
|
||||
"id": "TIM__Hm8rSEW"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Instantiate ElasticsearchEmbeddings using es_connection\n",
|
||||
"embeddings = ElasticsearchEmbeddings.from_es_connection(\n",
|
||||
" model_id,\n",
|
||||
" es_connection,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TIM__Hm8rSEW"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "TIM__Hm8rSEW"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1-CdnOrArVc_",
|
||||
"metadata": {
|
||||
"id": "1-CdnOrArVc_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ElasticKnnSearch\n",
|
||||
"knn_search = ElasticKnnSearch(\n",
|
||||
" es_connection=es_connection, index_name=test_index, embedding=embeddings\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1-CdnOrArVc_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "1-CdnOrArVc_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0kgyaL6QrYVF",
|
||||
"metadata": {
|
||||
"id": "0kgyaL6QrYVF"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -566,16 +562,13 @@
|
||||
"print(\n",
|
||||
" f\"The 'text' field value from the top hit is: '{knn_result['hits']['hits'][0]['_source']['text']}'\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0kgyaL6QrYVF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "0kgyaL6QrYVF"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
@@ -592,11 +585,8 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"colab": {
|
||||
"provenance": []
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,15 @@
|
||||
"Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install psycopg2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -149,7 +158,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,442 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Marqo\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the Marqo database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Marqo\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e104aee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Index langchain-demo exists.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import marqo \n",
|
||||
"\n",
|
||||
"# initialize marqo\n",
|
||||
"marqo_url = \"http://localhost:8882\" # if using marqo cloud replace with your endpoint (console.marqo.ai)\n",
|
||||
"marqo_api_key = \"\" # if using marqo cloud replace with your api key (console.marqo.ai)\n",
|
||||
"\n",
|
||||
"client = marqo.Client(url=marqo_url, api_key=marqo_api_key)\n",
|
||||
"\n",
|
||||
"index_name = \"langchain-demo\"\n",
|
||||
"\n",
|
||||
"docsearch = Marqo.from_documents(docs, index_name=index_name)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result_docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(result_docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "98704b27",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"0.68647254\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result_docs = docsearch.similarity_search_with_score(query)\n",
|
||||
"print(result_docs[0][0].page_content, result_docs[0][1], sep=\"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "eb3395b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Additional features\n",
|
||||
"\n",
|
||||
"One of the powerful features of Marqo as a vectorstore is that you can use indexes created externally. For example:\n",
|
||||
"\n",
|
||||
"+ If you had a database of image and text pairs from another application, you can simply just use it in langchain with the Marqo vectorstore. Note that bringing your own multimodal indexes will disable the `add_texts` method.\n",
|
||||
"\n",
|
||||
"+ If you had a database of text documents, you can bring it into the langchain framework and add more texts through `add_texts`.\n",
|
||||
"\n",
|
||||
"The documents that are returned are customised by passing your own function to the `page_content_builder` callback in the search methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "35b99fef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Multimodal Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'errors': False,\n",
|
||||
" 'processingTimeMs': 4675.6921890009835,\n",
|
||||
" 'index_name': 'langchain-multimodal-demo',\n",
|
||||
" 'items': [{'_id': '7af25f35-5d41-4ff5-95fa-ab6bd6755176',\n",
|
||||
" 'result': 'created',\n",
|
||||
" 'status': 201},\n",
|
||||
" {'_id': '70434d17-2680-4e33-b060-a37b9b8b6959',\n",
|
||||
" 'result': 'created',\n",
|
||||
" 'status': 201}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"# use a new index\n",
|
||||
"index_name = \"langchain-multimodal-demo\"\n",
|
||||
"\n",
|
||||
"# incase the demo is re-run\n",
|
||||
"try:\n",
|
||||
" client.delete_index(index_name)\n",
|
||||
"except Exception:\n",
|
||||
" print(f\"Creating {index_name}\")\n",
|
||||
" \n",
|
||||
"# This index could have been created by another system\n",
|
||||
"settings = {\"treat_urls_and_pointers_as_images\": True, \"model\": \"ViT-L/14\"}\n",
|
||||
"client.create_index(index_name, **settings)\n",
|
||||
"client.index(index_name).add_documents(\n",
|
||||
" [ \n",
|
||||
" # image of a bus\n",
|
||||
" {\n",
|
||||
" \"caption\": \"Bus\",\n",
|
||||
" \"image\": \"https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg\"\n",
|
||||
" },\n",
|
||||
" # image of a plane\n",
|
||||
" { \n",
|
||||
" \"caption\": \"Plane\", \n",
|
||||
" \"image\": \"https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg\"\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "368d1fab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_content(res):\n",
|
||||
" \"\"\"Helper to format Marqo's documents into text to be used as page_content\"\"\"\n",
|
||||
" return f\"{res['caption']}: {res['image']}\"\n",
|
||||
"\n",
|
||||
"docsearch = Marqo(client, index_name, page_content_builder=get_content)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"query = \"vehicles that fly\"\n",
|
||||
"doc_results = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "eef4edf9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Plane: https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg\n",
|
||||
"Bus: https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for doc in doc_results:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c255f603",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Text only example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "9e9a2b20",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'errors': False,\n",
|
||||
" 'processingTimeMs': 500.1302719992964,\n",
|
||||
" 'index_name': 'langchain-byo-index-demo',\n",
|
||||
" 'items': [{'_id': 'cbad6f9e-a4ea-45c6-9a85-1b9c0a59827c',\n",
|
||||
" 'result': 'created',\n",
|
||||
" 'status': 201},\n",
|
||||
" {'_id': 'c0be68cb-8847-4e95-a4c9-4791b54f772c',\n",
|
||||
" 'result': 'created',\n",
|
||||
" 'status': 201}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"# use a new index\n",
|
||||
"index_name = \"langchain-byo-index-demo\"\n",
|
||||
"\n",
|
||||
"# incase the demo is re-run\n",
|
||||
"try:\n",
|
||||
" client.delete_index(index_name)\n",
|
||||
"except Exception:\n",
|
||||
" print(f\"Creating {index_name}\")\n",
|
||||
"\n",
|
||||
"# This index could have been created by another system\n",
|
||||
"client.index(index_name).add_documents(\n",
|
||||
" [ \n",
|
||||
" {\n",
|
||||
" \"Title\": \"Smartphone\",\n",
|
||||
" \"Description\": \"A smartphone is a portable computer device that combines mobile telephone \"\n",
|
||||
" \"functions and computing functions into one unit.\",\n",
|
||||
" },\n",
|
||||
" { \n",
|
||||
" \"Title\": \"Telephone\",\n",
|
||||
" \"Description\": \"A telephone is a telecommunications device that permits two or more users to\"\n",
|
||||
" \"conduct a conversation when they are too far apart to be easily heard directly.\",\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b2943ea9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['484d8436-cb09-49f2-8f9d-39671c7ebfaa']"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Note text indexes retain the ability to use add_texts despite different field names in documents\n",
|
||||
"# this is because the page_content_builder callback lets you handle these document fields as required\n",
|
||||
"\n",
|
||||
"def get_content(res):\n",
|
||||
" \"\"\"Helper to format Marqo's documents into text to be used as page_content\"\"\"\n",
|
||||
" if 'text' in res:\n",
|
||||
" return res['text']\n",
|
||||
" return res['Description']\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docsearch = Marqo(client, index_name, page_content_builder=get_content)\n",
|
||||
"\n",
|
||||
"docsearch.add_texts([\"This is a document that is about elephants\"])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "851450e9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"A smartphone is a portable computer device that combines mobile telephone functions and computing functions into one unit.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"modern communications devices\"\n",
|
||||
"doc_results = docsearch.similarity_search(query)\n",
|
||||
"\n",
|
||||
"print(doc_results[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "9a438fec",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"This is a document that is about elephants\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"elephants\"\n",
|
||||
"doc_results = docsearch.similarity_search(query, page_content_builder=get_content)\n",
|
||||
"\n",
|
||||
"print(doc_results[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0d04c9d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Weighted Queries\n",
|
||||
"\n",
|
||||
"We also expose marqos weighted queries which are a powerful way to compose complex semantic searches."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "d42ba0d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"A smartphone is a portable computer device that combines mobile telephone functions and computing functions into one unit.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = {\"communications devices\" : 1.0}\n",
|
||||
"doc_results = docsearch.similarity_search(query)\n",
|
||||
"print(doc_results[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "b5918a16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"A telephone is a telecommunications device that permits two or more users toconduct a conversation when they are too far apart to be easily heard directly.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = {\"communications devices\" : 1.0, \"technology post 2000\": -1.0}\n",
|
||||
"doc_results = docsearch.similarity_search(query)\n",
|
||||
"print(doc_results[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MongoDB Atlas Vector Search\n",
|
||||
"# MongoDB Atlas\n",
|
||||
"\n",
|
||||
">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS , Azure, and GCP. It now has support for native Vector Search on your MongoDB document data.\n",
|
||||
"\n",
|
||||
@@ -214,7 +214,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -96,7 +96,7 @@
|
||||
"id": "01a9a035",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### similarity_search using Approximate k-NN\n",
|
||||
"## similarity_search using Approximate k-NN\n",
|
||||
"\n",
|
||||
"`similarity_search` using `Approximate k-NN` Search with Custom Parameters"
|
||||
]
|
||||
@@ -182,7 +182,7 @@
|
||||
"id": "0d0cd877",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### similarity_search using Script Scoring\n",
|
||||
"## similarity_search using Script Scoring\n",
|
||||
"\n",
|
||||
"`similarity_search` using `Script Scoring` with Custom Parameters"
|
||||
]
|
||||
@@ -221,7 +221,7 @@
|
||||
"id": "a4af96cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### similarity_search using Painless Scripting\n",
|
||||
"## similarity_search using Painless Scripting\n",
|
||||
"\n",
|
||||
"`similarity_search` using `Painless Scripting` with Custom Parameters"
|
||||
]
|
||||
@@ -258,32 +258,35 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f8fb0d0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Maximum marginal relevance search (MMR)\n",
|
||||
"## Maximum marginal relevance search (MMR)\n",
|
||||
"If you’d like to look up for some similar documents, but you’d also like to receive diverse results, MMR is method you should consider. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ba85e092",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.max_marginal_relevance_search(query, k=2, fetch_k=10, lambda_param=0.5)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73264864",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a preexisting OpenSearch instance\n",
|
||||
"## Using a preexisting OpenSearch instance\n",
|
||||
"\n",
|
||||
"It's also possible to use a preexisting OpenSearch instance with documents that already have vectors present."
|
||||
]
|
||||
@@ -330,7 +333,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -201,14 +201,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search with Euclidean Distance (Default)"
|
||||
"## Similarity Search with Euclidean Distance (Default)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -303,14 +296,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Working with vectorstore in PG"
|
||||
"## Working with vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Uploading a vectorstore in PG "
|
||||
"### Uploading a vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -336,7 +329,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieving a vectorstore in PG"
|
||||
"### Retrieving a vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -498,7 +491,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "20b588b4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Rockset Vector Search\n",
|
||||
"# Rockset\n",
|
||||
"\n",
|
||||
"[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters. \n",
|
||||
">[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters. \n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use Rockset as a vectorstore in langchain. To get started, make sure you have a Rockset account and an API key available."
|
||||
"This notebook demonstrates how to use `Rockset` as a vectorstore in langchain. To get started, make sure you have a `Rockset` account and an API key available."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "e290ddc0",
|
||||
"metadata": {},
|
||||
@@ -25,7 +23,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7d77bbbe",
|
||||
"metadata": {},
|
||||
@@ -52,7 +49,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7951c9cd",
|
||||
"metadata": {},
|
||||
@@ -71,7 +67,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8600900d",
|
||||
"metadata": {},
|
||||
@@ -80,12 +75,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "3bf2f818",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Rockset langchain vectorstore"
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -109,7 +103,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "474636a2",
|
||||
"metadata": {},
|
||||
@@ -138,7 +131,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1404cada",
|
||||
"metadata": {},
|
||||
@@ -173,7 +165,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f1290844",
|
||||
"metadata": {},
|
||||
@@ -205,7 +196,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5e15d630",
|
||||
"metadata": {},
|
||||
@@ -243,7 +233,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0765b822",
|
||||
"metadata": {},
|
||||
@@ -266,7 +255,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "03fa12a9",
|
||||
"metadata": {},
|
||||
@@ -277,6 +265,14 @@
|
||||
"\n",
|
||||
"Keep an eye on https://rockset.com/blog/introducing-vector-search-on-rockset/ for future updates in this space!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2763dddb-e87d-4d3b-b0bf-c246b0573d87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -295,7 +291,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -6,7 +6,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreDB\n",
|
||||
"[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. This tutorial illustrates how to [work with vector data in SingleStoreDB](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html)."
|
||||
">[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. \n",
|
||||
"\n",
|
||||
"This tutorial illustrates how to [work with vector data in SingleStoreDB](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -129,7 +131,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.2"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SKLearnVectorStore\n",
|
||||
"# scikit-learn\n",
|
||||
"\n",
|
||||
"[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n",
|
||||
">[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the `SKLearnVectorStore` vector database."
|
||||
]
|
||||
@@ -28,7 +27,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -48,7 +46,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -76,7 +73,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -120,7 +116,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -190,7 +185,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -209,7 +203,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "sofia",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -223,10 +217,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.16"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -7,11 +7,10 @@
|
||||
"source": [
|
||||
"# StarRocks\n",
|
||||
"\n",
|
||||
"[StarRocks | A High-Performance Analytical Database](https://www.starrocks.io/)\n",
|
||||
">[StarRocks](https://www.starrocks.io/) is a High-Performance Analytical Database.\n",
|
||||
"`StarRocks` is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.\n",
|
||||
"\n",
|
||||
"StarRocks is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.\n",
|
||||
"\n",
|
||||
"Usually StarRocks is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.\n",
|
||||
">Usually `StarRocks` is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.\n",
|
||||
"\n",
|
||||
"Here we'll show how to use the StarRocks Vector Store."
|
||||
]
|
||||
@@ -21,8 +20,17 @@
|
||||
"id": "1685854f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"## Import all used modules"
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "311d44bb-4aca-4f3b-8f97-5e1f29238e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install pymysql"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -305,7 +313,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,68 +2,67 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tigris\n",
|
||||
"\n",
|
||||
"> [Tigris](htttps://tigrisdata.com) is an open source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications.\n",
|
||||
"> Tigris eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
"> `Tigris` eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook guides you how to use Tigris as your VectorStore"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Pre requisites**\n",
|
||||
"1. An OpenAI account. You can sign up for an account [here](https://platform.openai.com/)\n",
|
||||
"2. [Sign up for a free Tigris account](https://console.preview.tigrisdata.cloud). Once you have signed up for the Tigris account, create a new project called `vectordemo`. Next, make a note of the *Uri* for the region you've created your project in, the **clientId** and **clientSecret**. You can get all this information from the **Application Keys** section of the project."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's first install our dependencies:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install tigrisdb openapi-schema-pydantic openai tiktoken"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will load the `OpenAI` api key and `Tigris` credentials in our environment"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
@@ -73,38 +72,42 @@
|
||||
"os.environ[\"TIGRIS_PROJECT\"] = getpass.getpass(\"Tigris Project Name:\")\n",
|
||||
"os.environ[\"TIGRIS_CLIENT_ID\"] = getpass.getpass(\"Tigris Client Id:\")\n",
|
||||
"os.environ[\"TIGRIS_CLIENT_SECRET\"] = getpass.getpass(\"Tigris Client Secret:\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Tigris\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialize Tigris vector store\n",
|
||||
"Let's import our test dataset:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
@@ -113,87 +116,89 @@
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store = Tigris.from_documents(docs, embeddings, index_name=\"my_embeddings\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = vector_store.similarity_search(query)\n",
|
||||
"print(found_docs)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search with score (vector distance)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = vector_store.similarity_search_with_score(query)\n",
|
||||
"for doc, score in result:\n",
|
||||
" print(f\"document={doc}, score={score}\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Typesense\n",
|
||||
"\n",
|
||||
@@ -10,97 +11,105 @@
|
||||
"> Typesense focuses on performance by storing the entire index in RAM (with a backup on disk) and also focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.\n",
|
||||
">\n",
|
||||
"> It also lets you combine attribute-based filtering together with vector queries, to fetch the most relevant documents."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook shows you how to use Typesense as your VectorStore."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's first install our dependencies:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install typesense openapi-schema-pydantic openai tiktoken"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-23T22:48:02.968822Z",
|
||||
"start_time": "2023-05-23T22:47:48.574094Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-23T22:48:02.968822Z",
|
||||
"start_time": "2023-05-23T22:47:48.574094Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-23T22:50:34.775893Z",
|
||||
"start_time": "2023-05-23T22:50:34.771889Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Typesense\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-23T22:50:34.775893Z",
|
||||
"start_time": "2023-05-23T22:50:34.771889Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's import our test dataset:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-23T22:56:19.093489Z",
|
||||
"start_time": "2023-05-23T22:56:19.089Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
@@ -109,18 +118,17 @@
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-23T22:56:19.093489Z",
|
||||
"start_time": "2023-05-23T22:56:19.089Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = Typesense.from_documents(\n",
|
||||
@@ -134,98 +142,103 @@
|
||||
" \"typesense_collection_name\": \"lang-chain\",\n",
|
||||
" },\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity Search"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = docsearch.similarity_search(query)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(found_docs[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Typesense as a Retriever\n",
|
||||
"\n",
|
||||
"Typesense, as all the other vector stores, is a LangChain Retriever, by using cosine similarity."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = docsearch.as_retriever()\n",
|
||||
"retriever"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"retriever.get_relevant_documents(query)[0]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -0,0 +1,227 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Amazon API Gateway"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[Amazon API Gateway](https://aws.amazon.com/api-gateway/) is a fully managed service that makes it easy for developers to create, publish, maintain, monitor, and secure APIs at any scale. APIs act as the \"front door\" for applications to access data, business logic, or functionality from your backend services. Using API Gateway, you can create RESTful APIs and WebSocket APIs that enable real-time two-way communication applications. API Gateway supports containerized and serverless workloads, as well as web applications.\n",
|
||||
"\n",
|
||||
"API Gateway handles all the tasks involved in accepting and processing up to hundreds of thousands of concurrent API calls, including traffic management, CORS support, authorization and access control, throttling, monitoring, and API version management. API Gateway has no minimum fees or startup costs. You pay for the API calls you receive and the amount of data transferred out and, with the API Gateway tiered pricing model, you can reduce your cost as your API usage scales."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import AmazonAPIGateway"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"api_url = \"https://<api_gateway_id>.execute-api.<region>.amazonaws.com/LATEST/HF\"\n",
|
||||
"llm = AmazonAPIGateway(api_url=api_url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'what day comes after Friday?\\nSaturday'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart\n",
|
||||
"parameters = {\n",
|
||||
" \"max_new_tokens\": 100,\n",
|
||||
" \"num_return_sequences\": 1,\n",
|
||||
" \"top_k\": 50,\n",
|
||||
" \"top_p\": 0.95,\n",
|
||||
" \"do_sample\": False,\n",
|
||||
" \"return_full_text\": True,\n",
|
||||
" \"temperature\": 0.2,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"prompt = \"what day comes after Friday?\"\n",
|
||||
"llm.model_kwargs = parameters\n",
|
||||
"llm(prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m\n",
|
||||
"I need to use the print function to output the string \"Hello, world!\"\n",
|
||||
"Action: Python_REPL\n",
|
||||
"Action Input: `print(\"Hello, world!\")`\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3mHello, world!\n",
|
||||
"\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m\n",
|
||||
"I now know how to print a string in Python\n",
|
||||
"Final Answer:\n",
|
||||
"Hello, world!\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello, world!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import load_tools\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"parameters = {\n",
|
||||
" \"max_new_tokens\": 50,\n",
|
||||
" \"num_return_sequences\": 1,\n",
|
||||
" \"top_k\": 250,\n",
|
||||
" \"top_p\": 0.25,\n",
|
||||
" \"do_sample\": False,\n",
|
||||
" \"temperature\": 0.1,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm.model_kwargs = parameters\n",
|
||||
"\n",
|
||||
"# Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in.\n",
|
||||
"tools = load_tools([\"python_repl\", \"llm-math\"], llm=llm)\n",
|
||||
"\n",
|
||||
"# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Now let's test it out!\n",
|
||||
"agent.run(\"\"\"\n",
|
||||
"Write a Python script that prints \"Hello, world!\"\n",
|
||||
"\"\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m I need to use the calculator to find the answer\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2.3 ^ 4.5\u001B[0m\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3mAnswer: 42.43998894277659\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
|
||||
"Final Answer: 42.43998894277659\n",
|
||||
"\n",
|
||||
"Question: \n",
|
||||
"What is the square root of 144?\n",
|
||||
"\n",
|
||||
"Thought: I need to use the calculator to find the answer\n",
|
||||
"Action:\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'42.43998894277659'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = agent.run(\n",
|
||||
" \"\"\"\n",
|
||||
"What is 2.3 ^ 4.5?\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"result.split(\"\\n\")[0]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -9,7 +9,7 @@ If you are just getting started, and you have relatively simple apis, you should
|
||||
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
|
||||
understand what is happening better.
|
||||
|
||||
- [API Chain](/docs/modules/chains/how_to/api.html)
|
||||
- [API Chain](/docs/modules/chains/popular/api.html)
|
||||
|
||||
## Agents
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ template = """You are a playwright. Given the title of play and the era it is se
|
||||
Title: {title}
|
||||
Era: {era}
|
||||
Playwright: This is a synopsis for the above play:"""
|
||||
prompt_template = PromptTemplate(input_variables=["title", 'era'], template=template)
|
||||
prompt_template = PromptTemplate(input_variables=["title", "era"], template=template)
|
||||
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="synopsis")
|
||||
```
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ qa.run(query)
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
The above way allows you to really simply change the chain_type, but it does provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](/docs/modules/chains/additional/question_answering.html)) and then pass that directly to the the RetrievalQA chain with the `combine_documents_chain` parameter. For example:
|
||||
The above way allows you to really simply change the chain_type, but it doesn't provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](/docs/modules/chains/additional/question_answering.html)) and then pass that directly to the the RetrievalQA chain with the `combine_documents_chain` parameter. For example:
|
||||
|
||||
|
||||
```python
|
||||
|
||||
@@ -0,0 +1,97 @@
|
||||
```python
|
||||
import langchain
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI()
|
||||
```
|
||||
|
||||
## In Memory Cache
|
||||
|
||||
|
||||
```python
|
||||
from langchain.cache import InMemoryCache
|
||||
langchain.llm_cache = InMemoryCache()
|
||||
|
||||
# The first time, it is not yet in cache, so it should take longer
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
CPU times: user 35.9 ms, sys: 28.6 ms, total: 64.6 ms
|
||||
Wall time: 4.83 s
|
||||
|
||||
|
||||
"\n\nWhy couldn't the bicycle stand up by itself? It was...two tired!"
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
# The second time it is, so it goes faster
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
CPU times: user 238 µs, sys: 143 µs, total: 381 µs
|
||||
Wall time: 1.76 ms
|
||||
|
||||
|
||||
'\n\nWhy did the chicken cross the road?\n\nTo get to the other side.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
## SQLite Cache
|
||||
|
||||
|
||||
```bash
|
||||
rm .langchain.db
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
# We can do the same thing with a SQLite cache
|
||||
from langchain.cache import SQLiteCache
|
||||
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
# The first time, it is not yet in cache, so it should take longer
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
CPU times: user 17 ms, sys: 9.76 ms, total: 26.7 ms
|
||||
Wall time: 825 ms
|
||||
|
||||
|
||||
'\n\nWhy did the chicken cross the road?\n\nTo get to the other side.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
# The second time it is, so it goes faster
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
CPU times: user 2.46 ms, sys: 1.23 ms, total: 3.7 ms
|
||||
Wall time: 2.67 ms
|
||||
|
||||
|
||||
'\n\nWhy did the chicken cross the road?\n\nTo get to the other side.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
@@ -14,7 +14,7 @@ from langchain.cache import InMemoryCache
|
||||
langchain.llm_cache = InMemoryCache()
|
||||
|
||||
# The first time, it is not yet in cache, so it should take longer
|
||||
llm("Tell me a joke")
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
@@ -32,7 +32,7 @@ llm("Tell me a joke")
|
||||
|
||||
```python
|
||||
# The second time it is, so it goes faster
|
||||
llm("Tell me a joke")
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
@@ -64,7 +64,7 @@ langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
|
||||
|
||||
```python
|
||||
# The first time, it is not yet in cache, so it should take longer
|
||||
llm("Tell me a joke")
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
@@ -82,7 +82,7 @@ llm("Tell me a joke")
|
||||
|
||||
```python
|
||||
# The second time it is, so it goes faster
|
||||
llm("Tell me a joke")
|
||||
llm.predict("Tell me a joke")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
1
langchain/agents/agent_toolkits/office365/__init__.py
Normal file
1
langchain/agents/agent_toolkits/office365/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Gmail toolkit."""
|
||||
38
langchain/agents/agent_toolkits/office365/toolkit.py
Normal file
38
langchain/agents/agent_toolkits/office365/toolkit.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.office365.create_draft_message import O365CreateDraftMessage
|
||||
from langchain.tools.office365.events_search import O365SearchEvents
|
||||
from langchain.tools.office365.messages_search import O365SearchEmails
|
||||
from langchain.tools.office365.send_event import O365SendEvent
|
||||
from langchain.tools.office365.send_message import O365SendMessage
|
||||
from langchain.tools.office365.utils import authenticate
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from O365 import Account
|
||||
|
||||
|
||||
class O365Toolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with Office365."""
|
||||
|
||||
account: Account = Field(default_factory=authenticate)
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
return [
|
||||
O365SearchEvents(account=self.account),
|
||||
O365CreateDraftMessage(account=self.account),
|
||||
O365SearchEmails(account=self.account),
|
||||
O365SendEvent(account=self.account),
|
||||
O365SendMessage(account=self.account),
|
||||
]
|
||||
@@ -29,6 +29,23 @@ class ZapierToolkit(BaseToolkit):
|
||||
]
|
||||
return cls(tools=tools)
|
||||
|
||||
@classmethod
|
||||
async def async_from_zapier_nla_wrapper(
|
||||
cls, zapier_nla_wrapper: ZapierNLAWrapper
|
||||
) -> "ZapierToolkit":
|
||||
"""Create a toolkit from a ZapierNLAWrapper."""
|
||||
actions = await zapier_nla_wrapper.alist()
|
||||
tools = [
|
||||
ZapierNLARunAction(
|
||||
action_id=action["id"],
|
||||
zapier_description=action["description"],
|
||||
params_schema=action["params"],
|
||||
api_wrapper=zapier_nla_wrapper,
|
||||
)
|
||||
for action in actions
|
||||
]
|
||||
return cls(tools=tools)
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
return self.tools
|
||||
|
||||
@@ -2,6 +2,8 @@ from enum import Enum
|
||||
|
||||
|
||||
class AgentType(str, Enum):
|
||||
"""Enumerator with the Agent types."""
|
||||
|
||||
ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description"
|
||||
REACT_DOCSTORE = "react-docstore"
|
||||
SELF_ASK_WITH_SEARCH = "self-ask-with-search"
|
||||
|
||||
@@ -17,13 +17,15 @@ class ChatOutputParser(AgentOutputParser):
|
||||
try:
|
||||
action = text.split("```")[1]
|
||||
response = json.loads(action.strip())
|
||||
includes_action = "action" in response and "action_input" in response
|
||||
includes_action = "action" in response
|
||||
if includes_answer and includes_action:
|
||||
raise OutputParserException(
|
||||
"Parsing LLM output produced a final answer "
|
||||
f"and a parse-able action: {text}"
|
||||
)
|
||||
return AgentAction(response["action"], response["action_input"], text)
|
||||
return AgentAction(
|
||||
response["action"], response.get("action_input", {}), text
|
||||
)
|
||||
|
||||
except Exception:
|
||||
if not includes_answer:
|
||||
|
||||
@@ -51,7 +51,7 @@ def initialize_agent(
|
||||
f"Got unknown agent type: {agent}. "
|
||||
f"Valid types are: {AGENT_TO_CLASS.keys()}."
|
||||
)
|
||||
tags_.append(agent.value)
|
||||
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
|
||||
agent_cls = AGENT_TO_CLASS[agent]
|
||||
agent_kwargs = agent_kwargs or {}
|
||||
agent_obj = agent_cls.from_llm_and_tools(
|
||||
|
||||
@@ -352,10 +352,23 @@ def load_huggingface_tool(
|
||||
remote: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> BaseTool:
|
||||
"""Loads a tool from the HuggingFace Hub.
|
||||
|
||||
Args:
|
||||
task_or_repo_id: Task or model repo id.
|
||||
model_repo_id: Optional model repo id.
|
||||
token: Optional token.
|
||||
remote: Optional remote. Defaults to False.
|
||||
**kwargs:
|
||||
|
||||
Returns:
|
||||
A tool.
|
||||
|
||||
"""
|
||||
try:
|
||||
from transformers import load_tool
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"HuggingFace tools require the libraries `transformers>=4.29.0`"
|
||||
" and `huggingface_hub>=0.14.1` to be installed."
|
||||
" Please install it with"
|
||||
|
||||
@@ -69,7 +69,7 @@ def _create_function_message(
|
||||
"""
|
||||
if not isinstance(observation, str):
|
||||
try:
|
||||
content = json.dumps(observation)
|
||||
content = json.dumps(observation, ensure_ascii=False)
|
||||
except Exception:
|
||||
content = str(observation)
|
||||
else:
|
||||
|
||||
@@ -68,7 +68,7 @@ def _create_function_message(
|
||||
"""
|
||||
if not isinstance(observation, str):
|
||||
try:
|
||||
content = json.dumps(observation)
|
||||
content = json.dumps(observation, ensure_ascii=False)
|
||||
except Exception:
|
||||
content = str(observation)
|
||||
else:
|
||||
@@ -296,7 +296,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
prompt = self.prompt.format_prompt(**full_inputs)
|
||||
messages = prompt.to_messages()
|
||||
predicted_message = await self.llm.apredict_messages(
|
||||
messages, functions=self.functions
|
||||
messages, functions=self.functions, callbacks=callbacks
|
||||
)
|
||||
agent_decision = _parse_ai_message(predicted_message)
|
||||
return agent_decision
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
import hashlib
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import timedelta
|
||||
from typing import (
|
||||
@@ -11,8 +12,8 @@ from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
@@ -31,13 +32,17 @@ except ImportError:
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.load.dump import dumps
|
||||
from langchain.load.load import loads
|
||||
from langchain.schema import Generation
|
||||
from langchain.vectorstores.redis import Redis as RedisVectorstore
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import momento
|
||||
|
||||
RETURN_VAL_TYPE = List[Generation]
|
||||
RETURN_VAL_TYPE = Sequence[Generation]
|
||||
|
||||
|
||||
def _hash(_input: str) -> str:
|
||||
@@ -147,13 +152,24 @@ class SQLAlchemyCache(BaseCache):
|
||||
with Session(self.engine) as session:
|
||||
rows = session.execute(stmt).fetchall()
|
||||
if rows:
|
||||
return [Generation(text=row[0]) for row in rows]
|
||||
try:
|
||||
return [loads(row[0]) for row in rows]
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Retrieving a cache value that could not be deserialized "
|
||||
"properly. This is likely due to the cache being in an "
|
||||
"older format. Please recreate your cache to avoid this "
|
||||
"error."
|
||||
)
|
||||
# In a previous life we stored the raw text directly
|
||||
# in the table, so assume it's in that format.
|
||||
return [Generation(text=row[0]) for row in rows]
|
||||
return None
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update based on prompt and llm_string."""
|
||||
items = [
|
||||
self.cache_schema(prompt=prompt, llm=llm_string, response=gen.text, idx=i)
|
||||
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
|
||||
for i, gen in enumerate(return_val)
|
||||
]
|
||||
with Session(self.engine) as session, session.begin():
|
||||
@@ -163,7 +179,7 @@ class SQLAlchemyCache(BaseCache):
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache."""
|
||||
with Session(self.engine) as session:
|
||||
session.execute(self.cache_schema.delete())
|
||||
session.query(self.cache_schema).delete()
|
||||
|
||||
|
||||
class SQLiteCache(SQLAlchemyCache):
|
||||
@@ -209,6 +225,12 @@ class RedisCache(BaseCache):
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
for gen in return_val:
|
||||
if not isinstance(gen, Generation):
|
||||
raise ValueError(
|
||||
"RedisCache only supports caching of normal LLM generations, "
|
||||
f"got {type(gen)}"
|
||||
)
|
||||
# Write to a Redis HASH
|
||||
key = self._key(prompt, llm_string)
|
||||
self.redis.hset(
|
||||
@@ -314,6 +336,12 @@ class RedisSemanticCache(BaseCache):
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
for gen in return_val:
|
||||
if not isinstance(gen, Generation):
|
||||
raise ValueError(
|
||||
"RedisSemanticCache only supports caching of "
|
||||
f"normal LLM generations, got {type(gen)}"
|
||||
)
|
||||
llm_cache = self._get_llm_cache(llm_string)
|
||||
# Write to vectorstore
|
||||
metadata = {
|
||||
@@ -426,6 +454,12 @@ class GPTCache(BaseCache):
|
||||
First, retrieve the corresponding cache object using the `llm_string` parameter,
|
||||
and then store the `prompt` and `return_val` in the cache object.
|
||||
"""
|
||||
for gen in return_val:
|
||||
if not isinstance(gen, Generation):
|
||||
raise ValueError(
|
||||
"GPTCache only supports caching of normal LLM generations, "
|
||||
f"got {type(gen)}"
|
||||
)
|
||||
from gptcache.adapter.api import put
|
||||
|
||||
_gptcache = self._get_gptcache(llm_string)
|
||||
@@ -567,7 +601,7 @@ class MomentoCache(BaseCache):
|
||||
"""
|
||||
from momento.responses import CacheGet
|
||||
|
||||
generations = []
|
||||
generations: RETURN_VAL_TYPE = []
|
||||
|
||||
get_response = self.cache_client.get(
|
||||
self.cache_name, self.__key(prompt, llm_string)
|
||||
@@ -593,6 +627,12 @@ class MomentoCache(BaseCache):
|
||||
SdkException: Momento service or network error
|
||||
Exception: Unexpected response
|
||||
"""
|
||||
for gen in return_val:
|
||||
if not isinstance(gen, Generation):
|
||||
raise ValueError(
|
||||
"Momento only supports caching of normal LLM generations, "
|
||||
f"got {type(gen)}"
|
||||
)
|
||||
key = self.__key(prompt, llm_string)
|
||||
value = _dump_generations_to_json(return_val)
|
||||
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
|
||||
|
||||
@@ -6,6 +6,7 @@ from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
def import_aim() -> Any:
|
||||
"""Import the aim python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import aim
|
||||
except ImportError:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
@@ -33,6 +32,7 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
self.prompt_tokens = 0
|
||||
self.completion_tokens = 0
|
||||
self.total_tokens = 0
|
||||
self.step = 0
|
||||
|
||||
from arize.pandas.embeddings import EmbeddingGenerator, UseCases
|
||||
from arize.pandas.logger import Client
|
||||
@@ -84,11 +84,10 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
self.total_tokens
|
||||
) = self.completion_tokens = 0 # assign default value
|
||||
|
||||
i = 0
|
||||
|
||||
for generations in response.generations:
|
||||
for generation in generations:
|
||||
prompt = self.prompt_records[i]
|
||||
prompt = self.prompt_records[self.step]
|
||||
self.step = self.step + 1
|
||||
prompt_embedding = pd.Series(
|
||||
self.generator.generate_embeddings(
|
||||
text_col=pd.Series(prompt.replace("\n", " "))
|
||||
@@ -102,7 +101,6 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
text_col=pd.Series(generation.text.replace("\n", " "))
|
||||
).reset_index(drop=True)
|
||||
)
|
||||
str(uuid.uuid4())
|
||||
pred_timestamp = datetime.now().timestamp()
|
||||
|
||||
# Define the columns and data
|
||||
@@ -165,8 +163,6 @@ class ArizeCallbackHandler(BaseCallbackHandler):
|
||||
else:
|
||||
print(f'❌ Logging failed "{response_from_arize.text}"')
|
||||
|
||||
i = i + 1
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
|
||||
@@ -17,6 +17,7 @@ from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
def import_clearml() -> Any:
|
||||
"""Import the clearml python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import clearml # noqa: F401
|
||||
except ImportError:
|
||||
|
||||
@@ -74,7 +74,16 @@ def _get_debug() -> bool:
|
||||
|
||||
@contextmanager
|
||||
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
"""Get OpenAI callback handler in a context manager."""
|
||||
"""Get the OpenAI callback handler in a context manager.
|
||||
which conveniently exposes token and cost information.
|
||||
|
||||
Returns:
|
||||
OpenAICallbackHandler: The OpenAI callback handler.
|
||||
|
||||
Example:
|
||||
>>> with get_openai_callback() as cb:
|
||||
... # Use the OpenAI callback handler
|
||||
"""
|
||||
cb = OpenAICallbackHandler()
|
||||
openai_callback_var.set(cb)
|
||||
yield cb
|
||||
@@ -85,7 +94,19 @@ def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
def tracing_enabled(
|
||||
session_name: str = "default",
|
||||
) -> Generator[TracerSessionV1, None, None]:
|
||||
"""Get Tracer in a context manager."""
|
||||
"""Get the Deprecated LangChainTracer in a context manager.
|
||||
|
||||
Args:
|
||||
session_name (str, optional): The name of the session.
|
||||
Defaults to "default".
|
||||
|
||||
Returns:
|
||||
TracerSessionV1: The LangChainTracer session.
|
||||
|
||||
Example:
|
||||
>>> with tracing_enabled() as session:
|
||||
... # Use the LangChainTracer session
|
||||
"""
|
||||
cb = LangChainTracerV1()
|
||||
session = cast(TracerSessionV1, cb.load_session(session_name))
|
||||
tracing_callback_var.set(cb)
|
||||
@@ -97,7 +118,19 @@ def tracing_enabled(
|
||||
def wandb_tracing_enabled(
|
||||
session_name: str = "default",
|
||||
) -> Generator[None, None, None]:
|
||||
"""Get WandbTracer in a context manager."""
|
||||
"""Get the WandbTracer in a context manager.
|
||||
|
||||
Args:
|
||||
session_name (str, optional): The name of the session.
|
||||
Defaults to "default".
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Example:
|
||||
>>> with wandb_tracing_enabled() as session:
|
||||
... # Use the WandbTracer session
|
||||
"""
|
||||
cb = WandbTracer()
|
||||
wandb_tracing_callback_var.set(cb)
|
||||
yield None
|
||||
@@ -110,7 +143,21 @@ def tracing_v2_enabled(
|
||||
*,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
) -> Generator[None, None, None]:
|
||||
"""Get the experimental tracer handler in a context manager."""
|
||||
"""Instruct LangChain to log all runs in context to LangSmith.
|
||||
|
||||
Args:
|
||||
project_name (str, optional): The name of the project.
|
||||
Defaults to "default".
|
||||
example_id (str or UUID, optional): The ID of the example.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Example:
|
||||
>>> with tracing_v2_enabled():
|
||||
... # LangChain code will automatically be traced
|
||||
"""
|
||||
# Issue a warning that this is experimental
|
||||
warnings.warn(
|
||||
"The tracing v2 API is in development. "
|
||||
@@ -133,14 +180,36 @@ def trace_as_chain_group(
|
||||
*,
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
) -> Generator[CallbackManager, None, None]:
|
||||
"""Get a callback manager for a chain group in a context manager."""
|
||||
"""Get a callback manager for a chain group in a context manager.
|
||||
Useful for grouping different calls together as a single run even if
|
||||
they aren't composed in a single chain.
|
||||
|
||||
Args:
|
||||
group_name (str): The name of the chain group.
|
||||
project_name (str, optional): The name of the project.
|
||||
Defaults to None.
|
||||
example_id (str or UUID, optional): The ID of the example.
|
||||
Defaults to None.
|
||||
tags (List[str], optional): The inheritable tags to apply to all runs.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
CallbackManager: The callback manager for the chain group.
|
||||
|
||||
Example:
|
||||
>>> with trace_as_chain_group("group_name") as manager:
|
||||
... # Use the callback manager for the chain group
|
||||
... llm.predict("Foo", callbacks=manager)
|
||||
"""
|
||||
cb = LangChainTracer(
|
||||
project_name=project_name,
|
||||
example_id=example_id,
|
||||
)
|
||||
cm = CallbackManager.configure(
|
||||
inheritable_callbacks=[cb],
|
||||
inheritable_tags=tags,
|
||||
)
|
||||
|
||||
run_manager = cm.on_chain_start({"name": group_name}, {})
|
||||
@@ -154,14 +223,34 @@ async def atrace_as_chain_group(
|
||||
*,
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
) -> AsyncGenerator[AsyncCallbackManager, None]:
|
||||
"""Get a callback manager for a chain group in a context manager."""
|
||||
"""Get an async callback manager for a chain group in a context manager.
|
||||
Useful for grouping different async calls together as a single run even if
|
||||
they aren't composed in a single chain.
|
||||
|
||||
Args:
|
||||
group_name (str): The name of the chain group.
|
||||
project_name (str, optional): The name of the project.
|
||||
Defaults to None.
|
||||
example_id (str or UUID, optional): The ID of the example.
|
||||
Defaults to None.
|
||||
tags (List[str], optional): The inheritable tags to apply to all runs.
|
||||
Defaults to None.
|
||||
Returns:
|
||||
AsyncCallbackManager: The async callback manager for the chain group.
|
||||
|
||||
Example:
|
||||
>>> async with atrace_as_chain_group("group_name") as manager:
|
||||
... # Use the async callback manager for the chain group
|
||||
... await llm.apredict("Foo", callbacks=manager)
|
||||
"""
|
||||
cb = LangChainTracer(
|
||||
project_name=project_name,
|
||||
example_id=example_id,
|
||||
)
|
||||
cm = AsyncCallbackManager.configure(
|
||||
inheritable_callbacks=[cb],
|
||||
inheritable_callbacks=[cb], inheritable_tags=tags
|
||||
)
|
||||
|
||||
run_manager = await cm.on_chain_start({"name": group_name}, {})
|
||||
@@ -293,7 +382,18 @@ class BaseRunManager(RunManagerMixin):
|
||||
tags: List[str],
|
||||
inheritable_tags: List[str],
|
||||
) -> None:
|
||||
"""Initialize run manager."""
|
||||
"""Initialize the run manager.
|
||||
|
||||
Args:
|
||||
run_id (UUID): The ID of the run.
|
||||
handlers (List[BaseCallbackHandler]): The list of handlers.
|
||||
inheritable_handlers (List[BaseCallbackHandler]):
|
||||
The list of inheritable handlers.
|
||||
parent_run_id (UUID, optional): The ID of the parent run.
|
||||
Defaults to None.
|
||||
tags (List[str]): The list of tags.
|
||||
inheritable_tags (List[str]): The list of inheritable tags.
|
||||
"""
|
||||
self.run_id = run_id
|
||||
self.handlers = handlers
|
||||
self.inheritable_handlers = inheritable_handlers
|
||||
@@ -303,7 +403,11 @@ class BaseRunManager(RunManagerMixin):
|
||||
|
||||
@classmethod
|
||||
def get_noop_manager(cls: Type[BRM]) -> BRM:
|
||||
"""Return a manager that doesn't perform any operations."""
|
||||
"""Return a manager that doesn't perform any operations.
|
||||
|
||||
Returns:
|
||||
BaseRunManager: The noop manager.
|
||||
"""
|
||||
return cls(
|
||||
run_id=uuid4(),
|
||||
handlers=[],
|
||||
@@ -321,7 +425,14 @@ class RunManager(BaseRunManager):
|
||||
text: str,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when text is received."""
|
||||
"""Run when text is received.
|
||||
|
||||
Args:
|
||||
text (str): The received text.
|
||||
|
||||
Returns:
|
||||
Any: The result of the callback.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_text",
|
||||
@@ -341,7 +452,14 @@ class AsyncRunManager(BaseRunManager):
|
||||
text: str,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when text is received."""
|
||||
"""Run when text is received.
|
||||
|
||||
Args:
|
||||
text (str): The received text.
|
||||
|
||||
Returns:
|
||||
Any: The result of the callback.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_text",
|
||||
@@ -361,7 +479,11 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
token: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
"""Run when LLM generates a new token.
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_new_token",
|
||||
@@ -373,7 +495,11 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The LLM result.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_end",
|
||||
@@ -389,7 +515,11 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_error",
|
||||
@@ -409,7 +539,11 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
token: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token."""
|
||||
"""Run when LLM generates a new token.
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_new_token",
|
||||
@@ -421,7 +555,11 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
)
|
||||
|
||||
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running."""
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The LLM result.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_end",
|
||||
@@ -437,7 +575,11 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
"""Run when LLM errors.
|
||||
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_error",
|
||||
@@ -453,7 +595,15 @@ class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
|
||||
"""Callback manager for chain run."""
|
||||
|
||||
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag (str, optional): The tag for the child callback manager.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
CallbackManager: The child callback manager.
|
||||
"""
|
||||
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
manager.add_tags(self.inheritable_tags)
|
||||
@@ -462,7 +612,11 @@ class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
|
||||
return manager
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
"""Run when chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (Dict[str, Any]): The outputs of the chain.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chain_end",
|
||||
@@ -478,7 +632,11 @@ class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chain_error",
|
||||
@@ -490,7 +648,14 @@ class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
|
||||
)
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run when agent action is received."""
|
||||
"""Run when agent action is received.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
|
||||
Returns:
|
||||
Any: The result of the callback.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_agent_action",
|
||||
@@ -502,7 +667,14 @@ class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
|
||||
)
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
"""Run when agent finish is received."""
|
||||
"""Run when agent finish is received.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
|
||||
Returns:
|
||||
Any: The result of the callback.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_agent_finish",
|
||||
@@ -518,7 +690,15 @@ class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
|
||||
"""Async callback manager for chain run."""
|
||||
|
||||
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag (str, optional): The tag for the child callback manager.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
AsyncCallbackManager: The child callback manager.
|
||||
"""
|
||||
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
manager.add_tags(self.inheritable_tags)
|
||||
@@ -527,7 +707,11 @@ class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
|
||||
return manager
|
||||
|
||||
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running."""
|
||||
"""Run when chain ends running.
|
||||
|
||||
Args:
|
||||
outputs (Dict[str, Any]): The outputs of the chain.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_chain_end",
|
||||
@@ -543,7 +727,11 @@ class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
"""Run when chain errors.
|
||||
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_chain_error",
|
||||
@@ -555,7 +743,14 @@ class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
|
||||
)
|
||||
|
||||
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run when agent action is received."""
|
||||
"""Run when agent action is received.
|
||||
|
||||
Args:
|
||||
action (AgentAction): The agent action.
|
||||
|
||||
Returns:
|
||||
Any: The result of the callback.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_agent_action",
|
||||
@@ -567,7 +762,14 @@ class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
|
||||
)
|
||||
|
||||
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
|
||||
"""Run when agent finish is received."""
|
||||
"""Run when agent finish is received.
|
||||
|
||||
Args:
|
||||
finish (AgentFinish): The agent finish.
|
||||
|
||||
Returns:
|
||||
Any: The result of the callback.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_agent_finish",
|
||||
@@ -583,7 +785,15 @@ class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
|
||||
"""Callback manager for tool run."""
|
||||
|
||||
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag (str, optional): The tag for the child callback manager.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
CallbackManager: The child callback manager.
|
||||
"""
|
||||
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
manager.add_tags(self.inheritable_tags)
|
||||
@@ -596,7 +806,11 @@ class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
|
||||
output: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool ends running."""
|
||||
"""Run when tool ends running.
|
||||
|
||||
Args:
|
||||
output (str): The output of the tool.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_tool_end",
|
||||
@@ -612,7 +826,11 @@ class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
"""
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_tool_error",
|
||||
@@ -628,7 +846,15 @@ class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
|
||||
"""Async callback manager for tool run."""
|
||||
|
||||
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
|
||||
"""Get a child callback manager."""
|
||||
"""Get a child callback manager.
|
||||
|
||||
Args:
|
||||
tag (str, optional): The tag to add to the child
|
||||
callback manager. Defaults to None.
|
||||
|
||||
Returns:
|
||||
AsyncCallbackManager: The child callback manager.
|
||||
"""
|
||||
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
|
||||
manager.set_handlers(self.inheritable_handlers)
|
||||
manager.add_tags(self.inheritable_tags)
|
||||
@@ -637,7 +863,11 @@ class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
|
||||
return manager
|
||||
|
||||
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""Run when tool ends running."""
|
||||
"""Run when tool ends running.
|
||||
|
||||
Args:
|
||||
output (str): The output of the tool.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_tool_end",
|
||||
@@ -653,7 +883,11 @@ class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
"""Run when tool errors.
|
||||
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
"""
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_tool_error",
|
||||
@@ -672,66 +906,92 @@ class CallbackManager(BaseCallbackManager):
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForLLMRun:
|
||||
"""Run when LLM starts running."""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
) -> List[CallbackManagerForLLMRun]:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_start",
|
||||
"ignore_llm",
|
||||
serialized,
|
||||
prompts,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized LLM.
|
||||
prompts (List[str]): The list of prompts.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
|
||||
return CallbackManagerForLLMRun(
|
||||
run_id=run_id,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
Returns:
|
||||
List[CallbackManagerForLLMRun]: A callback manager for each
|
||||
prompt as an LLM run.
|
||||
"""
|
||||
managers = []
|
||||
for prompt in prompts:
|
||||
run_id_ = uuid4()
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_llm_start",
|
||||
"ignore_llm",
|
||||
serialized,
|
||||
[prompt],
|
||||
run_id=run_id_,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
managers.append(
|
||||
CallbackManagerForLLMRun(
|
||||
run_id=run_id_,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
)
|
||||
|
||||
return managers
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForLLMRun:
|
||||
"""Run when LLM starts running."""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chat_model_start",
|
||||
"ignore_chat_model",
|
||||
serialized,
|
||||
messages,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
) -> List[CallbackManagerForLLMRun]:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
# Re-use the LLM Run Manager since the outputs are treated
|
||||
# the same for now
|
||||
return CallbackManagerForLLMRun(
|
||||
run_id=run_id,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized LLM.
|
||||
messages (List[List[BaseMessage]]): The list of messages.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[CallbackManagerForLLMRun]: A callback manager for each
|
||||
list of messages as an LLM run.
|
||||
"""
|
||||
|
||||
managers = []
|
||||
for message_list in messages:
|
||||
run_id_ = uuid4()
|
||||
_handle_event(
|
||||
self.handlers,
|
||||
"on_chat_model_start",
|
||||
"ignore_chat_model",
|
||||
serialized,
|
||||
[message_list],
|
||||
run_id=run_id_,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
managers.append(
|
||||
CallbackManagerForLLMRun(
|
||||
run_id=run_id_,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
)
|
||||
|
||||
return managers
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
@@ -740,7 +1000,16 @@ class CallbackManager(BaseCallbackManager):
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForChainRun:
|
||||
"""Run when chain starts running."""
|
||||
"""Run when chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized chain.
|
||||
inputs (Dict[str, Any]): The inputs to the chain.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
|
||||
Returns:
|
||||
CallbackManagerForChainRun: The callback manager for the chain run.
|
||||
"""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
|
||||
@@ -773,7 +1042,17 @@ class CallbackManager(BaseCallbackManager):
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForToolRun:
|
||||
"""Run when tool starts running."""
|
||||
"""Run when tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input to the tool.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
|
||||
|
||||
Returns:
|
||||
CallbackManagerForToolRun: The callback manager for the tool run.
|
||||
"""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
|
||||
@@ -807,7 +1086,22 @@ class CallbackManager(BaseCallbackManager):
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
local_tags: Optional[List[str]] = None,
|
||||
) -> CallbackManager:
|
||||
"""Configure the callback manager."""
|
||||
"""Configure the callback manager.
|
||||
|
||||
Args:
|
||||
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
|
||||
callbacks. Defaults to None.
|
||||
local_callbacks (Optional[Callbacks], optional): The local callbacks.
|
||||
Defaults to None.
|
||||
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
|
||||
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
|
||||
Defaults to None.
|
||||
local_tags (Optional[List[str]], optional): The local tags.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
CallbackManager: The configured callback manager.
|
||||
"""
|
||||
return _configure(
|
||||
cls,
|
||||
inheritable_callbacks,
|
||||
@@ -830,64 +1124,107 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForLLMRun:
|
||||
"""Run when LLM starts running."""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
) -> List[AsyncCallbackManagerForLLMRun]:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_start",
|
||||
"ignore_llm",
|
||||
serialized,
|
||||
prompts,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized LLM.
|
||||
prompts (List[str]): The list of prompts.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
|
||||
return AsyncCallbackManagerForLLMRun(
|
||||
run_id=run_id,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
Returns:
|
||||
List[AsyncCallbackManagerForLLMRun]: The list of async
|
||||
callback managers, one for each LLM Run corresponding
|
||||
to each prompt.
|
||||
"""
|
||||
|
||||
tasks = []
|
||||
managers = []
|
||||
|
||||
for prompt in prompts:
|
||||
run_id_ = uuid4()
|
||||
|
||||
tasks.append(
|
||||
_ahandle_event(
|
||||
self.handlers,
|
||||
"on_llm_start",
|
||||
"ignore_llm",
|
||||
serialized,
|
||||
[prompt],
|
||||
run_id=run_id_,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
)
|
||||
|
||||
managers.append(
|
||||
AsyncCallbackManagerForLLMRun(
|
||||
run_id=run_id_,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
)
|
||||
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
return managers
|
||||
|
||||
async def on_chat_model_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
"""Run when LLM starts running.
|
||||
|
||||
await _ahandle_event(
|
||||
self.handlers,
|
||||
"on_chat_model_start",
|
||||
"ignore_chat_model",
|
||||
serialized,
|
||||
messages,
|
||||
run_id=run_id,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized LLM.
|
||||
messages (List[List[BaseMessage]]): The list of messages.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
|
||||
return AsyncCallbackManagerForLLMRun(
|
||||
run_id=run_id,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
Returns:
|
||||
List[AsyncCallbackManagerForLLMRun]: The list of
|
||||
async callback managers, one for each LLM Run
|
||||
corresponding to each inner message list.
|
||||
"""
|
||||
tasks = []
|
||||
managers = []
|
||||
|
||||
for message_list in messages:
|
||||
run_id_ = uuid4()
|
||||
|
||||
tasks.append(
|
||||
_ahandle_event(
|
||||
self.handlers,
|
||||
"on_chat_model_start",
|
||||
"ignore_chat_model",
|
||||
serialized,
|
||||
[message_list],
|
||||
run_id=run_id_,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
**kwargs,
|
||||
)
|
||||
)
|
||||
|
||||
managers.append(
|
||||
AsyncCallbackManagerForLLMRun(
|
||||
run_id=run_id_,
|
||||
handlers=self.handlers,
|
||||
inheritable_handlers=self.inheritable_handlers,
|
||||
parent_run_id=self.parent_run_id,
|
||||
tags=self.tags,
|
||||
inheritable_tags=self.inheritable_tags,
|
||||
)
|
||||
)
|
||||
|
||||
await asyncio.gather(*tasks)
|
||||
return managers
|
||||
|
||||
async def on_chain_start(
|
||||
self,
|
||||
@@ -896,7 +1233,17 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForChainRun:
|
||||
"""Run when chain starts running."""
|
||||
"""Run when chain starts running.
|
||||
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized chain.
|
||||
inputs (Dict[str, Any]): The inputs to the chain.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
|
||||
Returns:
|
||||
AsyncCallbackManagerForChainRun: The async callback manager
|
||||
for the chain run.
|
||||
"""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
|
||||
@@ -929,7 +1276,19 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForToolRun:
|
||||
"""Run when tool starts running."""
|
||||
"""Run when tool starts running.
|
||||
|
||||
Args:
|
||||
serialized (Dict[str, Any]): The serialized tool.
|
||||
input_str (str): The input to the tool.
|
||||
run_id (UUID, optional): The ID of the run. Defaults to None.
|
||||
parent_run_id (UUID, optional): The ID of the parent run.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
AsyncCallbackManagerForToolRun: The async callback manager
|
||||
for the tool run.
|
||||
"""
|
||||
if run_id is None:
|
||||
run_id = uuid4()
|
||||
|
||||
@@ -963,7 +1322,22 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
local_tags: Optional[List[str]] = None,
|
||||
) -> AsyncCallbackManager:
|
||||
"""Configure the callback manager."""
|
||||
"""Configure the async callback manager.
|
||||
|
||||
Args:
|
||||
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
|
||||
callbacks. Defaults to None.
|
||||
local_callbacks (Optional[Callbacks], optional): The local callbacks.
|
||||
Defaults to None.
|
||||
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
|
||||
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
|
||||
Defaults to None.
|
||||
local_tags (Optional[List[str]], optional): The local tags.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
AsyncCallbackManager: The configured async callback manager.
|
||||
"""
|
||||
return _configure(
|
||||
cls,
|
||||
inheritable_callbacks,
|
||||
@@ -978,7 +1352,14 @@ T = TypeVar("T", CallbackManager, AsyncCallbackManager)
|
||||
|
||||
|
||||
def env_var_is_set(env_var: str) -> bool:
|
||||
"""Check if an environment variable is set."""
|
||||
"""Check if an environment variable is set.
|
||||
|
||||
Args:
|
||||
env_var (str): The name of the environment variable.
|
||||
|
||||
Returns:
|
||||
bool: True if the environment variable is set, False otherwise.
|
||||
"""
|
||||
return env_var in os.environ and os.environ[env_var] not in (
|
||||
"",
|
||||
"0",
|
||||
@@ -995,7 +1376,22 @@ def _configure(
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
local_tags: Optional[List[str]] = None,
|
||||
) -> T:
|
||||
"""Configure the callback manager."""
|
||||
"""Configure the callback manager.
|
||||
|
||||
Args:
|
||||
callback_manager_cls (Type[T]): The callback manager class.
|
||||
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
|
||||
callbacks. Defaults to None.
|
||||
local_callbacks (Optional[Callbacks], optional): The local callbacks.
|
||||
Defaults to None.
|
||||
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
|
||||
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
|
||||
Defaults to None.
|
||||
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
|
||||
|
||||
Returns:
|
||||
T: The configured callback manager.
|
||||
"""
|
||||
callback_manager = callback_manager_cls(handlers=[])
|
||||
if inheritable_callbacks or local_callbacks:
|
||||
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
|
||||
|
||||
@@ -20,6 +20,7 @@ from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
def import_mlflow() -> Any:
|
||||
"""Import the mlflow python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import mlflow
|
||||
except ImportError:
|
||||
@@ -117,7 +118,7 @@ class MlflowLogger:
|
||||
Parameters:
|
||||
name (str): Name of the run.
|
||||
experiment (str): Name of the experiment.
|
||||
tags (str): Tags to be attached for the run.
|
||||
tags (dict): Tags to be attached for the run.
|
||||
tracking_uri (str): MLflow tracking server uri.
|
||||
|
||||
This handler implements the helper functions to initialize,
|
||||
@@ -222,7 +223,7 @@ class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
Parameters:
|
||||
name (str): Name of the run.
|
||||
experiment (str): Name of the experiment.
|
||||
tags (str): Tags to be attached for the run.
|
||||
tags (dict): Tags to be attached for the run.
|
||||
tracking_uri (str): MLflow tracking server uri.
|
||||
|
||||
This handler will utilize the associated callback method called and formats
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""Callback Handler that prints to std out."""
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
MODEL_COST_PER_1K_TOKENS = {
|
||||
# GPT-4 input
|
||||
@@ -32,6 +32,7 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
"gpt-3.5-turbo-16k-completion": 0.004,
|
||||
"gpt-3.5-turbo-16k-0613-completion": 0.004,
|
||||
# Others
|
||||
"gpt-35-turbo": 0.002, # Azure OpenAI version of ChatGPT
|
||||
"text-ada-001": 0.0004,
|
||||
"ada": 0.0004,
|
||||
"text-babbage-001": 0.0005,
|
||||
@@ -52,6 +53,17 @@ def standardize_model_name(
|
||||
model_name: str,
|
||||
is_completion: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
Standardize the model name to a format that can be used in the OpenAI API.
|
||||
Args:
|
||||
model_name: Model name to standardize.
|
||||
is_completion: Whether the model is used for completion or not.
|
||||
Defaults to False.
|
||||
|
||||
Returns:
|
||||
Standardized model name.
|
||||
|
||||
"""
|
||||
model_name = model_name.lower()
|
||||
if "ft-" in model_name:
|
||||
return model_name.split(":")[0] + "-finetuned"
|
||||
@@ -66,13 +78,25 @@ def standardize_model_name(
|
||||
def get_openai_token_cost_for_model(
|
||||
model_name: str, num_tokens: int, is_completion: bool = False
|
||||
) -> float:
|
||||
"""
|
||||
Get the cost in USD for a given model and number of tokens.
|
||||
|
||||
Args:
|
||||
model_name: Name of the model
|
||||
num_tokens: Number of tokens.
|
||||
is_completion: Whether the model is used for completion or not.
|
||||
Defaults to False.
|
||||
|
||||
Returns:
|
||||
Cost in USD.
|
||||
"""
|
||||
model_name = standardize_model_name(model_name, is_completion=is_completion)
|
||||
if model_name not in MODEL_COST_PER_1K_TOKENS:
|
||||
raise ValueError(
|
||||
f"Unknown model: {model_name}. Please provide a valid OpenAI model name."
|
||||
"Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys())
|
||||
)
|
||||
return MODEL_COST_PER_1K_TOKENS[model_name] * num_tokens / 1000
|
||||
return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
|
||||
|
||||
|
||||
class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
@@ -129,64 +153,6 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
self.prompt_tokens += prompt_tokens
|
||||
self.completion_tokens += completion_tokens
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Print out that we are entering a chain."""
|
||||
pass
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Print out that we finished a chain."""
|
||||
pass
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Print out the log in specified color."""
|
||||
pass
|
||||
|
||||
def on_tool_end(
|
||||
self,
|
||||
output: str,
|
||||
color: Optional[str] = None,
|
||||
observation_prefix: Optional[str] = None,
|
||||
llm_prefix: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""If not the final action, print out observation."""
|
||||
pass
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Run on agent action."""
|
||||
pass
|
||||
|
||||
def on_agent_finish(
|
||||
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run on agent end."""
|
||||
pass
|
||||
|
||||
def __copy__(self) -> "OpenAICallbackHandler":
|
||||
"""Return a copy of the callback handler."""
|
||||
return self
|
||||
|
||||
88
langchain/callbacks/streaming_aiter_final_only.py
Normal file
88
langchain/callbacks/streaming_aiter_final_only.py
Normal file
@@ -0,0 +1,88 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
|
||||
|
||||
|
||||
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
|
||||
"""Callback handler that returns an async iterator.
|
||||
Only the final output of the agent will be iterated.
|
||||
"""
|
||||
|
||||
def append_to_last_tokens(self, token: str) -> None:
|
||||
self.last_tokens.append(token)
|
||||
self.last_tokens_stripped.append(token.strip())
|
||||
if len(self.last_tokens) > len(self.answer_prefix_tokens):
|
||||
self.last_tokens.pop(0)
|
||||
self.last_tokens_stripped.pop(0)
|
||||
|
||||
def check_if_answer_reached(self) -> bool:
|
||||
if self.strip_tokens:
|
||||
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
|
||||
else:
|
||||
return self.last_tokens == self.answer_prefix_tokens
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
answer_prefix_tokens: Optional[List[str]] = None,
|
||||
strip_tokens: bool = True,
|
||||
stream_prefix: bool = False,
|
||||
) -> None:
|
||||
"""Instantiate AsyncFinalIteratorCallbackHandler.
|
||||
|
||||
Args:
|
||||
answer_prefix_tokens: Token sequence that prefixes the answer.
|
||||
Default is ["Final", "Answer", ":"]
|
||||
strip_tokens: Ignore white spaces and new lines when comparing
|
||||
answer_prefix_tokens to last tokens? (to determine if answer has been
|
||||
reached)
|
||||
stream_prefix: Should answer prefix itself also be streamed?
|
||||
"""
|
||||
super().__init__()
|
||||
if answer_prefix_tokens is None:
|
||||
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
|
||||
else:
|
||||
self.answer_prefix_tokens = answer_prefix_tokens
|
||||
if strip_tokens:
|
||||
self.answer_prefix_tokens_stripped = [
|
||||
token.strip() for token in self.answer_prefix_tokens
|
||||
]
|
||||
else:
|
||||
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
|
||||
self.last_tokens = [""] * len(self.answer_prefix_tokens)
|
||||
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
|
||||
self.strip_tokens = strip_tokens
|
||||
self.stream_prefix = stream_prefix
|
||||
self.answer_reached = False
|
||||
|
||||
async def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
# If two calls are made in a row, this resets the state
|
||||
self.done.clear()
|
||||
self.answer_reached = False
|
||||
|
||||
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
if self.answer_reached:
|
||||
self.done.set()
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
# Remember the last n tokens, where n = len(answer_prefix_tokens)
|
||||
self.append_to_last_tokens(token)
|
||||
|
||||
# Check if the last n tokens match the answer_prefix_tokens list ...
|
||||
if self.check_if_answer_reached():
|
||||
self.answer_reached = True
|
||||
if self.stream_prefix:
|
||||
for t in self.last_tokens:
|
||||
self.queue.put_nowait(t)
|
||||
return
|
||||
|
||||
# If yes, then put tokens from now on
|
||||
if self.answer_reached:
|
||||
self.queue.put_nowait(token)
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Base interfaces for tracing runs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
@@ -10,6 +11,8 @@ from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.callbacks.tracers.schemas import Run, RunTypeEnum
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TracerException(Exception):
|
||||
"""Base class for exceptions in tracers module."""
|
||||
@@ -41,9 +44,7 @@ class BaseTracer(BaseCallbackHandler, ABC):
|
||||
if parent_run:
|
||||
self._add_child_run(parent_run, run)
|
||||
else:
|
||||
raise TracerException(
|
||||
f"Parent run with UUID {run.parent_run_id} not found."
|
||||
)
|
||||
logger.warning(f"Parent run with UUID {run.parent_run_id} not found.")
|
||||
self.run_map[str(run.id)] = run
|
||||
|
||||
def _end_trace(self, run: Run) -> None:
|
||||
@@ -53,10 +54,8 @@ class BaseTracer(BaseCallbackHandler, ABC):
|
||||
else:
|
||||
parent_run = self.run_map.get(str(run.parent_run_id))
|
||||
if parent_run is None:
|
||||
raise TracerException(
|
||||
f"Parent run with UUID {run.parent_run_id} not found."
|
||||
)
|
||||
if (
|
||||
logger.warning(f"Parent run with UUID {run.parent_run_id} not found.")
|
||||
elif (
|
||||
run.child_execution_order is not None
|
||||
and parent_run.child_execution_order is not None
|
||||
and run.child_execution_order > parent_run.child_execution_order
|
||||
@@ -71,7 +70,8 @@ class BaseTracer(BaseCallbackHandler, ABC):
|
||||
|
||||
parent_run = self.run_map.get(parent_run_id)
|
||||
if parent_run is None:
|
||||
raise TracerException(f"Parent run with UUID {parent_run_id} not found.")
|
||||
logger.warning(f"Parent run with UUID {parent_run_id} not found.")
|
||||
return 1
|
||||
if parent_run.child_execution_order is None:
|
||||
raise TracerException(
|
||||
f"Parent run with UUID {parent_run_id} has no child execution order."
|
||||
|
||||
84
langchain/callbacks/tracers/evaluation.py
Normal file
84
langchain/callbacks/tracers/evaluation.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""A tracer that runs evaluators over completed runs."""
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, wait
|
||||
from typing import Any, Optional, Sequence, Set, Union
|
||||
from uuid import UUID
|
||||
|
||||
from langchainplus_sdk import LangChainPlusClient, RunEvaluator
|
||||
|
||||
from langchain.callbacks.tracers.base import BaseTracer
|
||||
from langchain.callbacks.tracers.schemas import Run
|
||||
|
||||
|
||||
class EvaluatorCallbackHandler(BaseTracer):
|
||||
"""A tracer that runs a run evaluator whenever a run is persisted.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
evaluators : Sequence[RunEvaluator]
|
||||
The run evaluators to apply to all top level runs.
|
||||
max_workers : int, optional
|
||||
The maximum number of worker threads to use for running the evaluators.
|
||||
If not specified, it will default to the number of evaluators.
|
||||
client : LangChainPlusClient, optional
|
||||
The LangChainPlusClient instance to use for evaluating the runs.
|
||||
If not specified, a new instance will be created.
|
||||
example_id : Union[UUID, str], optional
|
||||
The example ID to be associated with the runs.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
example_id : Union[UUID, None]
|
||||
The example ID associated with the runs.
|
||||
client : LangChainPlusClient
|
||||
The LangChainPlusClient instance used for evaluating the runs.
|
||||
evaluators : Sequence[RunEvaluator]
|
||||
The sequence of run evaluators to be executed.
|
||||
executor : ThreadPoolExecutor
|
||||
The thread pool executor used for running the evaluators.
|
||||
futures : Set[Future]
|
||||
The set of futures representing the running evaluators.
|
||||
"""
|
||||
|
||||
name = "evaluator_callback_handler"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
evaluators: Sequence[RunEvaluator],
|
||||
max_workers: Optional[int] = None,
|
||||
client: Optional[LangChainPlusClient] = None,
|
||||
example_id: Optional[Union[UUID, str]] = None,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self.example_id = (
|
||||
UUID(example_id) if isinstance(example_id, str) else example_id
|
||||
)
|
||||
self.client = client or LangChainPlusClient()
|
||||
self.evaluators = evaluators
|
||||
self.executor = ThreadPoolExecutor(
|
||||
max_workers=max(max_workers or len(evaluators), 1)
|
||||
)
|
||||
self.futures: Set[Future] = set()
|
||||
|
||||
def _persist_run(self, run: Run) -> None:
|
||||
"""Run the evaluator on the run.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
run : Run
|
||||
The run to be evaluated.
|
||||
|
||||
"""
|
||||
run_ = run.copy()
|
||||
run_.reference_example_id = self.example_id
|
||||
for evaluator in self.evaluators:
|
||||
self.futures.add(
|
||||
self.executor.submit(self.client.evaluate_run, run_, evaluator)
|
||||
)
|
||||
|
||||
def wait_for_futures(self) -> None:
|
||||
"""Wait for all futures to complete."""
|
||||
futures = list(self.futures)
|
||||
wait(futures)
|
||||
for future in futures:
|
||||
self.futures.remove(future)
|
||||
@@ -1,20 +1,52 @@
|
||||
"""A tracer that collects all nested runs in a list."""
|
||||
from typing import Any, List
|
||||
|
||||
from typing import Any, List, Optional, Union
|
||||
from uuid import UUID
|
||||
|
||||
from langchain.callbacks.tracers.base import BaseTracer
|
||||
from langchain.callbacks.tracers.schemas import Run
|
||||
|
||||
|
||||
class RunCollectorCallbackHandler(BaseTracer):
|
||||
"""A tracer that collects all nested runs in a list.
|
||||
"""
|
||||
A tracer that collects all nested runs in a list.
|
||||
|
||||
Useful for inspection and for evaluation."""
|
||||
This tracer is useful for inspection and evaluation purposes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
example_id : Optional[Union[UUID, str]], default=None
|
||||
The ID of the example being traced. It can be either a UUID or a string.
|
||||
"""
|
||||
|
||||
name = "run-collector_callback_handler"
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
def __init__(
|
||||
self, example_id: Optional[Union[UUID, str]] = None, **kwargs: Any
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the RunCollectorCallbackHandler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
example_id : Optional[Union[UUID, str]], default=None
|
||||
The ID of the example being traced. It can be either a UUID or a string.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.example_id = (
|
||||
UUID(example_id) if isinstance(example_id, str) else example_id
|
||||
)
|
||||
self.traced_runs: List[Run] = []
|
||||
|
||||
def _persist_run(self, run: Run) -> None:
|
||||
self.traced_runs.append(run)
|
||||
"""
|
||||
Persist a run by adding it to the traced_runs list.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
run : Run
|
||||
The run to be persisted.
|
||||
"""
|
||||
run_ = run.copy()
|
||||
run_.reference_example_id = self.example_id
|
||||
self.traced_runs.append(run_)
|
||||
|
||||
@@ -7,6 +7,16 @@ from langchain.input import get_bolded_text, get_colored_text
|
||||
|
||||
|
||||
def try_json_stringify(obj: Any, fallback: str) -> str:
|
||||
"""
|
||||
Try to stringify an object to JSON.
|
||||
Args:
|
||||
obj: Object to stringify.
|
||||
fallback: Fallback string to return if the object cannot be stringified.
|
||||
|
||||
Returns:
|
||||
A JSON string if the object can be stringified, otherwise the fallback string.
|
||||
|
||||
"""
|
||||
try:
|
||||
return json.dumps(obj, indent=2, ensure_ascii=False)
|
||||
except Exception:
|
||||
@@ -14,6 +24,16 @@ def try_json_stringify(obj: Any, fallback: str) -> str:
|
||||
|
||||
|
||||
def elapsed(run: Any) -> str:
|
||||
"""Get the elapsed time of a run.
|
||||
|
||||
Args:
|
||||
run: any object with a start_time and end_time attribute.
|
||||
|
||||
Returns:
|
||||
A string with the elapsed time in seconds or
|
||||
milliseconds if time is less than a second.
|
||||
|
||||
"""
|
||||
elapsed_time = run.end_time - run.start_time
|
||||
milliseconds = elapsed_time.total_seconds() * 1000
|
||||
if milliseconds < 1000:
|
||||
@@ -22,6 +42,8 @@ def elapsed(run: Any) -> str:
|
||||
|
||||
|
||||
class ConsoleCallbackHandler(BaseTracer):
|
||||
"""Tracer that prints to the console."""
|
||||
|
||||
name = "console_callback_handler"
|
||||
|
||||
def _persist_run(self, run: Run) -> None:
|
||||
|
||||
@@ -137,6 +137,8 @@ def _replace_type_with_kind(data: Any) -> Any:
|
||||
|
||||
|
||||
class WandbRunArgs(TypedDict):
|
||||
"""Arguments for the WandbTracer."""
|
||||
|
||||
job_type: Optional[str]
|
||||
dir: Optional[StrPath]
|
||||
config: Union[Dict, str, None]
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Dict, Iterable, Tuple, Union
|
||||
|
||||
|
||||
def import_spacy() -> Any:
|
||||
"""Import the spacy python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import spacy
|
||||
except ImportError:
|
||||
@@ -15,6 +16,7 @@ def import_spacy() -> Any:
|
||||
|
||||
|
||||
def import_pandas() -> Any:
|
||||
"""Import the pandas python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import pandas
|
||||
except ImportError:
|
||||
@@ -26,6 +28,7 @@ def import_pandas() -> Any:
|
||||
|
||||
|
||||
def import_textstat() -> Any:
|
||||
"""Import the textstat python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import textstat
|
||||
except ImportError:
|
||||
|
||||
@@ -17,6 +17,7 @@ from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
def import_wandb() -> Any:
|
||||
"""Import the wandb python package and raise an error if it is not installed."""
|
||||
try:
|
||||
import wandb # noqa: F401
|
||||
except ImportError:
|
||||
|
||||
@@ -18,6 +18,16 @@ def import_langkit(
|
||||
toxicity: bool = False,
|
||||
themes: bool = False,
|
||||
) -> Any:
|
||||
"""Import the langkit python package and raise an error if it is not installed.
|
||||
|
||||
Args:
|
||||
sentiment: Whether to import the langkit.sentiment module. Defaults to False.
|
||||
toxicity: Whether to import the langkit.toxicity module. Defaults to False.
|
||||
themes: Whether to import the langkit.themes module. Defaults to False.
|
||||
|
||||
Returns:
|
||||
The imported langkit module.
|
||||
"""
|
||||
try:
|
||||
import langkit # noqa: F401
|
||||
import langkit.regexes # noqa: F401
|
||||
|
||||
@@ -18,6 +18,14 @@ INTERMEDIATE_STEPS_KEY = "intermediate_steps"
|
||||
|
||||
|
||||
def extract_cypher(text: str) -> str:
|
||||
"""
|
||||
Extract Cypher code from a text.
|
||||
Args:
|
||||
text: Text to extract Cypher code from.
|
||||
|
||||
Returns:
|
||||
Cypher code extracted from the text.
|
||||
"""
|
||||
# The pattern to find Cypher code enclosed in triple backticks
|
||||
pattern = r"```(.*?)```"
|
||||
|
||||
|
||||
@@ -34,6 +34,8 @@ black_listed_elements: Set[str] = {
|
||||
|
||||
|
||||
class ElementInViewPort(TypedDict):
|
||||
"""A typed dictionary containing information about elements in the viewport."""
|
||||
|
||||
node_index: str
|
||||
backend_node_id: int
|
||||
node_name: Optional[str]
|
||||
@@ -51,7 +53,7 @@ class Crawler:
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"Could not import playwright python package. "
|
||||
"Please install it with `pip install playwright`."
|
||||
)
|
||||
|
||||
@@ -64,6 +64,14 @@ class QuestionAnswer(BaseModel):
|
||||
|
||||
|
||||
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
||||
"""Create a citation fuzzy match chain.
|
||||
|
||||
Args:
|
||||
llm: Language model to use for the chain.
|
||||
|
||||
Returns:
|
||||
Chain (LLMChain) that can be used to answer questions with citations.
|
||||
"""
|
||||
output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer)
|
||||
schema = QuestionAnswer.schema()
|
||||
function = {
|
||||
|
||||
@@ -40,6 +40,15 @@ Passage:
|
||||
|
||||
|
||||
def create_extraction_chain(schema: dict, llm: BaseLanguageModel) -> Chain:
|
||||
"""Creates a chain that extracts information from a passage.
|
||||
|
||||
Args:
|
||||
schema: The schema of the entities to extract.
|
||||
llm: The language model to use.
|
||||
|
||||
Returns:
|
||||
Chain that can be used to extract information from a passage.
|
||||
"""
|
||||
function = _get_extraction_function(schema)
|
||||
prompt = ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
|
||||
output_parser = JsonKeyOutputFunctionsParser(key_name="info")
|
||||
@@ -56,6 +65,16 @@ def create_extraction_chain(schema: dict, llm: BaseLanguageModel) -> Chain:
|
||||
def create_extraction_chain_pydantic(
|
||||
pydantic_schema: Any, llm: BaseLanguageModel
|
||||
) -> Chain:
|
||||
"""Creates a chain that extracts information from a passage using pydantic schema.
|
||||
|
||||
Args:
|
||||
pydantic_schema: The pydantic schema of the entities to extract.
|
||||
llm: The language model to use.
|
||||
|
||||
Returns:
|
||||
Chain that can be used to extract information from a passage.
|
||||
"""
|
||||
|
||||
class PydanticSchema(BaseModel):
|
||||
info: List[pydantic_schema] # type: ignore
|
||||
|
||||
|
||||
291
langchain/chains/openai_functions/openapi.py
Normal file
291
langchain/chains/openai_functions/openapi.py
Normal file
@@ -0,0 +1,291 @@
|
||||
import json
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import requests
|
||||
from openapi_schema_pydantic import Parameter
|
||||
from requests import Response
|
||||
|
||||
from langchain import BasePromptTemplate, LLMChain
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.sequential import SequentialChain
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.input import get_colored_text
|
||||
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain.tools import APIOperation
|
||||
from langchain.utilities.openapi import OpenAPISpec
|
||||
|
||||
|
||||
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
|
||||
summary = getattr(o, "summary", None)
|
||||
description = getattr(o, "description", None)
|
||||
if prefer_short:
|
||||
return summary or description
|
||||
return description or summary
|
||||
|
||||
|
||||
def _format_url(url: str, path_params: dict) -> str:
|
||||
expected_path_param = re.findall(r"{(.*?)}", url)
|
||||
new_params = {}
|
||||
for param in expected_path_param:
|
||||
clean_param = param.lstrip(".;").rstrip("*")
|
||||
val = path_params[clean_param]
|
||||
if isinstance(val, list):
|
||||
if param[0] == ".":
|
||||
sep = "." if param[-1] == "*" else ","
|
||||
new_val = "." + sep.join(val)
|
||||
elif param[0] == ";":
|
||||
sep = f"{clean_param}=" if param[-1] == "*" else ","
|
||||
new_val = f"{clean_param}=" + sep.join(val)
|
||||
else:
|
||||
new_val = ",".join(val)
|
||||
elif isinstance(val, dict):
|
||||
kv_sep = "=" if param[-1] == "*" else ","
|
||||
kv_strs = [kv_sep.join((k, v)) for k, v in val.items()]
|
||||
if param[0] == ".":
|
||||
sep = "."
|
||||
new_val = "."
|
||||
elif param[0] == ";":
|
||||
sep = ";"
|
||||
new_val = ";"
|
||||
else:
|
||||
sep = ","
|
||||
new_val = ""
|
||||
new_val += sep.join(kv_strs)
|
||||
else:
|
||||
if param[0] == ".":
|
||||
new_val = f".{val}"
|
||||
elif param[0] == ";":
|
||||
new_val = f";{clean_param}={val}"
|
||||
else:
|
||||
new_val = val
|
||||
new_params[param] = new_val
|
||||
return url.format(**new_params)
|
||||
|
||||
|
||||
def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -> dict:
|
||||
properties = {}
|
||||
required = []
|
||||
for p in params:
|
||||
if p.param_schema:
|
||||
schema = spec.get_schema(p.param_schema)
|
||||
else:
|
||||
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501
|
||||
schema = spec.get_schema(media_type_schema)
|
||||
if p.description and not schema.description:
|
||||
schema.description = p.description
|
||||
properties[p.name] = json.loads(schema.json(exclude_none=True))
|
||||
if p.required:
|
||||
required.append(p.name)
|
||||
return {"type": "object", "properties": properties, "required": required}
|
||||
|
||||
|
||||
def openapi_spec_to_openai_fn(
|
||||
spec: OpenAPISpec,
|
||||
) -> Tuple[List[Dict[str, Any]], Callable]:
|
||||
"""Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI
|
||||
functions.
|
||||
|
||||
Args:
|
||||
spec: OpenAPI spec to convert.
|
||||
|
||||
Returns:
|
||||
Tuple of the OpenAI functions JSON schema and a default function for executing
|
||||
a request based on the OpenAI function schema.
|
||||
"""
|
||||
if not spec.paths:
|
||||
return [], lambda: None
|
||||
functions = []
|
||||
_name_to_call_map = {}
|
||||
for path in spec.paths:
|
||||
path_params = {
|
||||
(p.name, p.param_in): p for p in spec.get_parameters_for_path(path)
|
||||
}
|
||||
for method in spec.get_methods_for_path(path):
|
||||
request_args = {}
|
||||
op = spec.get_operation(path, method)
|
||||
op_params = path_params.copy()
|
||||
for param in spec.get_parameters_for_operation(op):
|
||||
op_params[(param.name, param.param_in)] = param
|
||||
params_by_type = defaultdict(list)
|
||||
for name_loc, p in op_params.items():
|
||||
params_by_type[name_loc[1]].append(p)
|
||||
param_loc_to_arg_name = {
|
||||
"query": "params",
|
||||
"header": "headers",
|
||||
"cookie": "cookies",
|
||||
"path": "path_params",
|
||||
}
|
||||
for param_loc, arg_name in param_loc_to_arg_name.items():
|
||||
if params_by_type[param_loc]:
|
||||
request_args[arg_name] = _openapi_params_to_json_schema(
|
||||
params_by_type[param_loc], spec
|
||||
)
|
||||
request_body = spec.get_request_body_for_operation(op)
|
||||
# TODO: Support more MIME types.
|
||||
if request_body and request_body.content:
|
||||
media_types = {}
|
||||
for media_type, media_type_object in request_body.content.items():
|
||||
if media_type_object.media_type_schema:
|
||||
schema = spec.get_schema(media_type_object.media_type_schema)
|
||||
media_types[media_type] = json.loads(
|
||||
schema.json(exclude_none=True)
|
||||
)
|
||||
if len(media_types) == 1:
|
||||
media_type, schema_dict = list(media_types.items())[0]
|
||||
key = "json" if media_type == "application/json" else "data"
|
||||
request_args[key] = schema_dict
|
||||
elif len(media_types) > 1:
|
||||
request_args["data"] = {"anyOf": list(media_types.values())}
|
||||
|
||||
api_op = APIOperation.from_openapi_spec(spec, path, method)
|
||||
fn = {
|
||||
"name": api_op.operation_id,
|
||||
"description": api_op.description,
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": request_args,
|
||||
},
|
||||
}
|
||||
functions.append(fn)
|
||||
_name_to_call_map[fn["name"]] = {
|
||||
"method": method,
|
||||
"url": api_op.base_url + api_op.path,
|
||||
}
|
||||
|
||||
def default_call_api(
|
||||
name: str,
|
||||
fn_args: dict,
|
||||
headers: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
method = _name_to_call_map[name]["method"]
|
||||
url = _name_to_call_map[name]["url"]
|
||||
path_params = fn_args.pop("path_params", {})
|
||||
url = _format_url(url, path_params)
|
||||
if "data" in fn_args and isinstance(fn_args["data"], dict):
|
||||
fn_args["data"] = json.dumps(fn_args["data"])
|
||||
_kwargs = {**fn_args, **kwargs}
|
||||
if headers is not None:
|
||||
if "headers" in _kwargs:
|
||||
_kwargs["headers"].update(headers)
|
||||
else:
|
||||
_kwargs["headers"] = headers
|
||||
if params is not None:
|
||||
if "params" in _kwargs:
|
||||
_kwargs["params"].update(params)
|
||||
else:
|
||||
_kwargs["params"] = params
|
||||
return requests.request(method, url, **_kwargs)
|
||||
|
||||
return functions, default_call_api
|
||||
|
||||
|
||||
class SimpleRequestChain(Chain):
|
||||
request_method: Callable
|
||||
output_key: str = "response"
|
||||
input_key: str = "function"
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return [self.output_key]
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run the logic of this chain and return the output."""
|
||||
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
||||
name = inputs["function"].pop("name")
|
||||
args = inputs["function"].pop("arguments")
|
||||
_pretty_name = get_colored_text(name, "green")
|
||||
_pretty_args = get_colored_text(json.dumps(args, indent=2), "green")
|
||||
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
|
||||
_run_manager.on_text(_text)
|
||||
api_response: Response = self.request_method(name, args)
|
||||
if api_response.status_code != 200:
|
||||
response = (
|
||||
f"{api_response.status_code}: {api_response.reason}"
|
||||
+ f"\nFor {name} "
|
||||
+ f"Called with args: {args['params']}"
|
||||
)
|
||||
else:
|
||||
try:
|
||||
response = api_response.json()
|
||||
except Exception: # noqa: E722
|
||||
response = api_response.text
|
||||
return {self.output_key: response}
|
||||
|
||||
|
||||
def get_openapi_chain(
|
||||
spec: Union[OpenAPISpec, str],
|
||||
llm: Optional[BaseLanguageModel] = None,
|
||||
prompt: Optional[BasePromptTemplate] = None,
|
||||
request_chain: Optional[Chain] = None,
|
||||
llm_kwargs: Optional[Dict] = None,
|
||||
verbose: bool = False,
|
||||
headers: Optional[Dict] = None,
|
||||
params: Optional[Dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> SequentialChain:
|
||||
"""Create a chain for querying an API from a OpenAPI spec.
|
||||
|
||||
Args:
|
||||
spec: OpenAPISpec or url/file/text string corresponding to one.
|
||||
llm: language model, should be an OpenAI function-calling model, e.g.
|
||||
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
|
||||
prompt: Main prompt template to use.
|
||||
request_chain: Chain for taking the functions output and executing the request.
|
||||
"""
|
||||
if isinstance(spec, str):
|
||||
for conversion in (
|
||||
OpenAPISpec.from_url,
|
||||
OpenAPISpec.from_file,
|
||||
OpenAPISpec.from_text,
|
||||
):
|
||||
try:
|
||||
spec = conversion(spec) # type: ignore[arg-type]
|
||||
break
|
||||
except Exception: # noqa: E722
|
||||
pass
|
||||
if isinstance(spec, str):
|
||||
raise ValueError(f"Unable to parse spec from source {spec}")
|
||||
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
|
||||
llm = llm or ChatOpenAI(
|
||||
model="gpt-3.5-turbo-0613",
|
||||
)
|
||||
prompt = prompt or ChatPromptTemplate.from_template(
|
||||
"Use the provided API's to respond to this user query:\n\n{query}"
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
llm_kwargs={"functions": openai_fns},
|
||||
output_parser=JsonOutputFunctionsParser(args_only=False),
|
||||
output_key="function",
|
||||
verbose=verbose,
|
||||
**(llm_kwargs or {}),
|
||||
)
|
||||
request_chain = request_chain or SimpleRequestChain(
|
||||
request_method=lambda name, args: call_api_fn(
|
||||
name, args, headers=headers, params=params
|
||||
),
|
||||
verbose=verbose,
|
||||
)
|
||||
return SequentialChain(
|
||||
chains=[llm_chain, request_chain],
|
||||
input_variables=llm_chain.input_keys,
|
||||
output_variables=["response"],
|
||||
verbose=verbose,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -29,6 +29,18 @@ def create_qa_with_structure_chain(
|
||||
output_parser: str = "base",
|
||||
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
|
||||
) -> LLMChain:
|
||||
"""Create a question answering chain that returns an answer with sources.
|
||||
|
||||
Args:
|
||||
llm: Language model to use for the chain.
|
||||
schema: Pydantic schema to use for the output.
|
||||
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
|
||||
Default to `base`.
|
||||
prompt: Optional prompt to use for the chain.
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
if output_parser == "pydantic":
|
||||
if not (isinstance(schema, type) and issubclass(schema, BaseModel)):
|
||||
raise ValueError(
|
||||
@@ -79,4 +91,13 @@ def create_qa_with_structure_chain(
|
||||
|
||||
|
||||
def create_qa_with_sources_chain(llm: BaseLanguageModel, **kwargs: Any) -> LLMChain:
|
||||
"""Create a question answering chain that returns an answer with sources.
|
||||
|
||||
Args:
|
||||
llm: Language model to use for the chain.
|
||||
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
|
||||
|
||||
Returns:
|
||||
Chain (LLMChain) that can be used to answer questions with citations.
|
||||
"""
|
||||
return create_qa_with_structure_chain(llm, AnswerWithSources, **kwargs)
|
||||
|
||||
@@ -27,6 +27,15 @@ Passage:
|
||||
|
||||
|
||||
def create_tagging_chain(schema: dict, llm: BaseLanguageModel) -> Chain:
|
||||
"""Creates a chain that extracts information from a passage.
|
||||
|
||||
Args:
|
||||
schema: The schema of the entities to extract.
|
||||
llm: The language model to use.
|
||||
|
||||
Returns:
|
||||
Chain (LLMChain) that can be used to extract information from a passage.
|
||||
"""
|
||||
function = _get_tagging_function(schema)
|
||||
prompt = ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
|
||||
output_parser = JsonOutputFunctionsParser()
|
||||
@@ -43,6 +52,15 @@ def create_tagging_chain(schema: dict, llm: BaseLanguageModel) -> Chain:
|
||||
def create_tagging_chain_pydantic(
|
||||
pydantic_schema: Any, llm: BaseLanguageModel
|
||||
) -> Chain:
|
||||
"""Creates a chain that extracts information from a passage.
|
||||
|
||||
Args:
|
||||
pydantic_schema: The pydantic schema of the entities to extract.
|
||||
llm: The language model to use.
|
||||
|
||||
Returns:
|
||||
Chain (LLMChain) that can be used to extract information from a passage.
|
||||
"""
|
||||
openai_schema = pydantic_schema.schema()
|
||||
function = _get_tagging_function(openai_schema)
|
||||
prompt = ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
|
||||
|
||||
@@ -29,4 +29,12 @@ def _convert_schema(schema: dict) -> dict:
|
||||
|
||||
|
||||
def get_llm_kwargs(function: dict) -> dict:
|
||||
"""Returns the kwargs for the LLMChain constructor.
|
||||
|
||||
Args:
|
||||
function: The function to use.
|
||||
|
||||
Returns:
|
||||
The kwargs for the LLMChain constructor.
|
||||
"""
|
||||
return {"functions": [function], "function_call": {"name": function["name"]}}
|
||||
|
||||
@@ -31,8 +31,24 @@ class ConditionalPromptSelector(BasePromptSelector):
|
||||
|
||||
|
||||
def is_llm(llm: BaseLanguageModel) -> bool:
|
||||
"""Check if the language model is a LLM.
|
||||
|
||||
Args:
|
||||
llm: Language model to check.
|
||||
|
||||
Returns:
|
||||
True if the language model is a BaseLLM model, False otherwise.
|
||||
"""
|
||||
return isinstance(llm, BaseLLM)
|
||||
|
||||
|
||||
def is_chat_model(llm: BaseLanguageModel) -> bool:
|
||||
"""Check if the language model is a chat model.
|
||||
|
||||
Args:
|
||||
llm: Language model to check.
|
||||
|
||||
Returns:
|
||||
True if the language model is a BaseChatModel model, False otherwise.
|
||||
"""
|
||||
return isinstance(llm, BaseChatModel)
|
||||
|
||||
@@ -123,6 +123,22 @@ def load_query_constructor_chain(
|
||||
enable_limit: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> LLMChain:
|
||||
"""
|
||||
Load a query constructor chain.
|
||||
Args:
|
||||
llm: BaseLanguageModel to use for the chain.
|
||||
document_contents: The contents of the document to be queried.
|
||||
attribute_info: A list of AttributeInfo objects describing
|
||||
the attributes of the document.
|
||||
examples: Optional list of examples to use for the chain.
|
||||
allowed_comparators: An optional list of allowed comparators.
|
||||
allowed_operators: An optional list of allowed operators.
|
||||
enable_limit: Whether to enable the limit operator. Defaults to False.
|
||||
**kwargs:
|
||||
|
||||
Returns:
|
||||
A LLMChain that can be used to construct queries.
|
||||
"""
|
||||
prompt = _get_prompt(
|
||||
document_contents,
|
||||
attribute_info,
|
||||
|
||||
@@ -60,12 +60,16 @@ class Expr(BaseModel):
|
||||
|
||||
|
||||
class Operator(str, Enum):
|
||||
"""Enumerator of the operations."""
|
||||
|
||||
AND = "and"
|
||||
OR = "or"
|
||||
NOT = "not"
|
||||
|
||||
|
||||
class Comparator(str, Enum):
|
||||
"""Enumerator of the comparison operators."""
|
||||
|
||||
EQ = "eq"
|
||||
GT = "gt"
|
||||
GTE = "gte"
|
||||
|
||||
@@ -57,6 +57,9 @@ GRAMMAR = """
|
||||
|
||||
@v_args(inline=True)
|
||||
class QueryTransformer(Transformer):
|
||||
"""Transforms a query string into an IR representation
|
||||
(intermediate representation)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args: Any,
|
||||
@@ -136,6 +139,16 @@ def get_parser(
|
||||
allowed_comparators: Optional[Sequence[Comparator]] = None,
|
||||
allowed_operators: Optional[Sequence[Operator]] = None,
|
||||
) -> Lark:
|
||||
"""
|
||||
Returns a parser for the query language.
|
||||
|
||||
Args:
|
||||
allowed_comparators: Optional[Sequence[Comparator]]
|
||||
allowed_operators: Optional[Sequence[Operator]]
|
||||
|
||||
Returns:
|
||||
Lark parser for the query language.
|
||||
"""
|
||||
transformer = QueryTransformer(
|
||||
allowed_comparators=allowed_comparators, allowed_operators=allowed_operators
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||
from langchain.chat_models.fake import FakeListChatModel
|
||||
from langchain.chat_models.google_palm import ChatGooglePalm
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||
@@ -8,6 +9,7 @@ from langchain.chat_models.vertexai import ChatVertexAI
|
||||
__all__ = [
|
||||
"ChatOpenAI",
|
||||
"AzureChatOpenAI",
|
||||
"FakeListChatModel",
|
||||
"PromptLayerChatOpenAI",
|
||||
"ChatAnthropic",
|
||||
"ChatGooglePalm",
|
||||
|
||||
@@ -17,7 +17,7 @@ from langchain.callbacks.manager import (
|
||||
CallbackManagerForLLMRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.load.dump import dumpd
|
||||
from langchain.load.dump import dumpd, dumps
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
@@ -35,6 +35,7 @@ def _get_verbosity() -> bool:
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel, ABC):
|
||||
cache: Optional[bool] = None
|
||||
verbose: bool = Field(default_factory=_get_verbosity)
|
||||
"""Whether to print out response text."""
|
||||
callbacks: Callbacks = Field(default=None, exclude=True)
|
||||
@@ -61,6 +62,25 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
return {}
|
||||
|
||||
def _get_invocation_params(
|
||||
self,
|
||||
stop: Optional[List[str]] = None,
|
||||
) -> dict:
|
||||
params = self.dict()
|
||||
params["stop"] = stop
|
||||
return params
|
||||
|
||||
def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
|
||||
if self.lc_serializable:
|
||||
params = {**kwargs, **{"stop": stop}}
|
||||
param_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||
llm_string = dumps(self)
|
||||
return llm_string + "---" + param_string
|
||||
else:
|
||||
params = self._get_invocation_params(stop=stop)
|
||||
params = {**params, **kwargs}
|
||||
return str(sorted([(k, v) for k, v in params.items()]))
|
||||
|
||||
def generate(
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
@@ -71,9 +91,7 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
"""Top Level call"""
|
||||
|
||||
params = self.dict()
|
||||
params["stop"] = stop
|
||||
params = self._get_invocation_params(stop=stop)
|
||||
options = {"stop": stop}
|
||||
|
||||
callback_manager = CallbackManager.configure(
|
||||
@@ -83,29 +101,37 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
tags,
|
||||
self.tags,
|
||||
)
|
||||
run_manager = callback_manager.on_chat_model_start(
|
||||
run_managers = callback_manager.on_chat_model_start(
|
||||
dumpd(self), messages, invocation_params=params, options=options
|
||||
)
|
||||
|
||||
new_arg_supported = inspect.signature(self._generate).parameters.get(
|
||||
"run_manager"
|
||||
)
|
||||
try:
|
||||
results = [
|
||||
self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
|
||||
if new_arg_supported
|
||||
else self._generate(m, stop=stop)
|
||||
for m in messages
|
||||
]
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
run_manager.on_llm_error(e)
|
||||
raise e
|
||||
results = []
|
||||
for i, m in enumerate(messages):
|
||||
try:
|
||||
results.append(
|
||||
self._generate_with_cache(
|
||||
m,
|
||||
stop=stop,
|
||||
run_manager=run_managers[i] if run_managers else None,
|
||||
**kwargs,
|
||||
)
|
||||
)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
if run_managers:
|
||||
run_managers[i].on_llm_error(e)
|
||||
raise e
|
||||
flattened_outputs = [
|
||||
LLMResult(generations=[res.generations], llm_output=res.llm_output)
|
||||
for res in results
|
||||
]
|
||||
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
|
||||
generations = [res.generations for res in results]
|
||||
output = LLMResult(generations=generations, llm_output=llm_output)
|
||||
run_manager.on_llm_end(output)
|
||||
if run_manager:
|
||||
output.run = RunInfo(run_id=run_manager.run_id)
|
||||
if run_managers:
|
||||
run_infos = []
|
||||
for manager, flattened_output in zip(run_managers, flattened_outputs):
|
||||
manager.on_llm_end(flattened_output)
|
||||
run_infos.append(RunInfo(run_id=manager.run_id))
|
||||
output.run = run_infos
|
||||
return output
|
||||
|
||||
async def agenerate(
|
||||
@@ -118,8 +144,7 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
"""Top Level call"""
|
||||
params = self.dict()
|
||||
params["stop"] = stop
|
||||
params = self._get_invocation_params(stop=stop)
|
||||
options = {"stop": stop}
|
||||
|
||||
callback_manager = AsyncCallbackManager.configure(
|
||||
@@ -129,31 +154,62 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
tags,
|
||||
self.tags,
|
||||
)
|
||||
run_manager = await callback_manager.on_chat_model_start(
|
||||
|
||||
run_managers = await callback_manager.on_chat_model_start(
|
||||
dumpd(self), messages, invocation_params=params, options=options
|
||||
)
|
||||
|
||||
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
|
||||
"run_manager"
|
||||
results = await asyncio.gather(
|
||||
*[
|
||||
self._agenerate_with_cache(
|
||||
m,
|
||||
stop=stop,
|
||||
run_manager=run_managers[i] if run_managers else None,
|
||||
**kwargs,
|
||||
)
|
||||
for i, m in enumerate(messages)
|
||||
],
|
||||
return_exceptions=True,
|
||||
)
|
||||
try:
|
||||
results = await asyncio.gather(
|
||||
*[
|
||||
self._agenerate(m, stop=stop, run_manager=run_manager, **kwargs)
|
||||
if new_arg_supported
|
||||
else self._agenerate(m, stop=stop)
|
||||
for m in messages
|
||||
]
|
||||
)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
await run_manager.on_llm_error(e)
|
||||
raise e
|
||||
exceptions = []
|
||||
for i, res in enumerate(results):
|
||||
if isinstance(res, Exception):
|
||||
if run_managers:
|
||||
await run_managers[i].on_llm_error(res)
|
||||
exceptions.append(res)
|
||||
if exceptions:
|
||||
if run_managers:
|
||||
await asyncio.gather(
|
||||
*[
|
||||
run_manager.on_llm_end(
|
||||
LLMResult(
|
||||
generations=[res.generations], llm_output=res.llm_output
|
||||
)
|
||||
)
|
||||
for run_manager, res in zip(run_managers, results)
|
||||
if not isinstance(res, Exception)
|
||||
]
|
||||
)
|
||||
raise exceptions[0]
|
||||
flattened_outputs = [
|
||||
LLMResult(generations=[res.generations], llm_output=res.llm_output)
|
||||
for res in results
|
||||
]
|
||||
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
|
||||
generations = [res.generations for res in results]
|
||||
output = LLMResult(generations=generations, llm_output=llm_output)
|
||||
await run_manager.on_llm_end(output)
|
||||
if run_manager:
|
||||
output.run = RunInfo(run_id=run_manager.run_id)
|
||||
await asyncio.gather(
|
||||
*[
|
||||
run_manager.on_llm_end(flattened_output)
|
||||
for run_manager, flattened_output in zip(
|
||||
run_managers, flattened_outputs
|
||||
)
|
||||
]
|
||||
)
|
||||
if run_managers:
|
||||
output.run = [
|
||||
RunInfo(run_id=run_manager.run_id) for run_manager in run_managers
|
||||
]
|
||||
return output
|
||||
|
||||
def generate_prompt(
|
||||
@@ -178,6 +234,84 @@ class BaseChatModel(BaseLanguageModel, ABC):
|
||||
prompt_messages, stop=stop, callbacks=callbacks, **kwargs
|
||||
)
|
||||
|
||||
def _generate_with_cache(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
new_arg_supported = inspect.signature(self._generate).parameters.get(
|
||||
"run_manager"
|
||||
)
|
||||
disregard_cache = self.cache is not None and not self.cache
|
||||
if langchain.llm_cache is None or disregard_cache:
|
||||
# This happens when langchain.cache is None, but self.cache is True
|
||||
if self.cache is not None and self.cache:
|
||||
raise ValueError(
|
||||
"Asked to cache, but no cache found at `langchain.cache`."
|
||||
)
|
||||
if new_arg_supported:
|
||||
return self._generate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
return self._generate(messages, stop=stop, **kwargs)
|
||||
else:
|
||||
llm_string = self._get_llm_string(stop=stop, **kwargs)
|
||||
prompt = dumps(messages)
|
||||
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
|
||||
if isinstance(cache_val, list):
|
||||
return ChatResult(generations=cache_val)
|
||||
else:
|
||||
if new_arg_supported:
|
||||
result = self._generate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
result = self._generate(messages, stop=stop, **kwargs)
|
||||
langchain.llm_cache.update(prompt, llm_string, result.generations)
|
||||
return result
|
||||
|
||||
async def _agenerate_with_cache(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
|
||||
"run_manager"
|
||||
)
|
||||
disregard_cache = self.cache is not None and not self.cache
|
||||
if langchain.llm_cache is None or disregard_cache:
|
||||
# This happens when langchain.cache is None, but self.cache is True
|
||||
if self.cache is not None and self.cache:
|
||||
raise ValueError(
|
||||
"Asked to cache, but no cache found at `langchain.cache`."
|
||||
)
|
||||
if new_arg_supported:
|
||||
return await self._agenerate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
return await self._agenerate(messages, stop=stop, **kwargs)
|
||||
else:
|
||||
llm_string = self._get_llm_string(stop=stop, **kwargs)
|
||||
prompt = dumps(messages)
|
||||
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
|
||||
if isinstance(cache_val, list):
|
||||
return ChatResult(generations=cache_val)
|
||||
else:
|
||||
if new_arg_supported:
|
||||
result = await self._agenerate(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
else:
|
||||
result = await self._agenerate(messages, stop=stop, **kwargs)
|
||||
langchain.llm_cache.update(prompt, llm_string, result.generations)
|
||||
return result
|
||||
|
||||
@abstractmethod
|
||||
def _generate(
|
||||
self,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user