mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 09:10:27 +00:00
Compare commits
222 Commits
bagatur/rf
...
erick/test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cba2a6ca40 | ||
|
|
cd806400fc | ||
|
|
9678797625 | ||
|
|
15e42f1799 | ||
|
|
50ba3c68bb | ||
|
|
6ef12fdfd2 | ||
|
|
c05cbf0533 | ||
|
|
ed789be8f4 | ||
|
|
971d29e718 | ||
|
|
b0cfb86c48 | ||
|
|
b5f8cf9509 | ||
|
|
f685d2f50c | ||
|
|
29660f8918 | ||
|
|
9b0b0032c2 | ||
|
|
e4c6b66455 | ||
|
|
e8633e53c4 | ||
|
|
78521caf51 | ||
|
|
4f88a5130e | ||
|
|
9775de46cc | ||
|
|
f6e3aa9770 | ||
|
|
d068e8ea54 | ||
|
|
e237dcec91 | ||
|
|
9cf6661dc5 | ||
|
|
a51a257575 | ||
|
|
ecd72d26cf | ||
|
|
a53370a060 | ||
|
|
e5e38e89ce | ||
|
|
da957a22cc | ||
|
|
919b8a387f | ||
|
|
7248e98b9e | ||
|
|
1ec8199c8e | ||
|
|
42f158c128 | ||
|
|
0e26b16930 | ||
|
|
66e1005898 | ||
|
|
3d91be94b1 | ||
|
|
c524bf31f5 | ||
|
|
3019a594b7 | ||
|
|
815ec74298 | ||
|
|
375051a64e | ||
|
|
762f49162a | ||
|
|
9e54c227f1 | ||
|
|
242981b8f0 | ||
|
|
581095b9b5 | ||
|
|
ed0b7c3b72 | ||
|
|
5019951a5d | ||
|
|
2f2b77602e | ||
|
|
d9aa11d589 | ||
|
|
f8a3b8e83f | ||
|
|
3acd0c74fc | ||
|
|
afc1ba0329 | ||
|
|
144f59b5fe | ||
|
|
9ece134d45 | ||
|
|
29ee0496b6 | ||
|
|
31891092d8 | ||
|
|
1b0802babe | ||
|
|
f541545c96 | ||
|
|
41726dfa27 | ||
|
|
0a9a519a39 | ||
|
|
7735721929 | ||
|
|
6f5b7b55bd | ||
|
|
8381f859b4 | ||
|
|
e6311d953d | ||
|
|
c1bb5fd498 | ||
|
|
5395c254d5 | ||
|
|
a206d3cf69 | ||
|
|
f59ddcab74 | ||
|
|
691ff67096 | ||
|
|
bebe401b1a | ||
|
|
4e28888d45 | ||
|
|
f154cd64fe | ||
|
|
223e5eff14 | ||
|
|
6e854ae371 | ||
|
|
47b1b7092d | ||
|
|
3ba1cb8650 | ||
|
|
33555e5cbc | ||
|
|
92e52e89ca | ||
|
|
441160d6b3 | ||
|
|
b13e52b6ac | ||
|
|
865cabff05 | ||
|
|
07ee41d284 | ||
|
|
5ed16adbde | ||
|
|
da7bca2178 | ||
|
|
441448372d | ||
|
|
a9d3c100a2 | ||
|
|
ad285ca15c | ||
|
|
ea61302f71 | ||
|
|
919ebcc596 | ||
|
|
6275d8b1bf | ||
|
|
86ae48b781 | ||
|
|
0d294760e7 | ||
|
|
8009be862e | ||
|
|
6c18f73ca5 | ||
|
|
e92e96193f | ||
|
|
43dc5d3416 | ||
|
|
1d2aa19aee | ||
|
|
73edf17b4e | ||
|
|
a058c8812d | ||
|
|
d7c26c89b2 | ||
|
|
8d4547ae97 | ||
|
|
75465a2a3c | ||
|
|
2a239710a0 | ||
|
|
19ebc7418e | ||
|
|
0b33abc8b1 | ||
|
|
e25b722ea9 | ||
|
|
b4fa847a90 | ||
|
|
8f14234afb | ||
|
|
bf8e3c6dd1 | ||
|
|
64743dea14 | ||
|
|
9d7ca7df6e | ||
|
|
c8d96f30bd | ||
|
|
8f5c70769d | ||
|
|
44db4412c0 | ||
|
|
0835ebad70 | ||
|
|
88af4fd514 | ||
|
|
aa31025dd7 | ||
|
|
cc562e7c58 | ||
|
|
5240ecab99 | ||
|
|
f6f0ca1bae | ||
|
|
6cc6faa00e | ||
|
|
20a56fe0a2 | ||
|
|
bccc9241ea | ||
|
|
a84a3add25 | ||
|
|
6746adf363 | ||
|
|
789cd5198d | ||
|
|
387cacb881 | ||
|
|
ff1f985a2a | ||
|
|
f3e4a0e27f | ||
|
|
53b8c86309 | ||
|
|
fc1617c44f | ||
|
|
79119b4345 | ||
|
|
ca2d4078f3 | ||
|
|
e438fe6be9 | ||
|
|
7ae3ce60d2 | ||
|
|
91bcc9c5c9 | ||
|
|
7c6009b76f | ||
|
|
86d3e42853 | ||
|
|
916332ef5b | ||
|
|
f6d3a3546f | ||
|
|
c776cfc599 | ||
|
|
d07db457fc | ||
|
|
70c296ae96 | ||
|
|
de9a6cdf16 | ||
|
|
8562a1e7d4 | ||
|
|
e36bc379f2 | ||
|
|
0bc4a9b3fc | ||
|
|
5ce1827d31 | ||
|
|
685d62b032 | ||
|
|
bfaa8c3048 | ||
|
|
a99c667c22 | ||
|
|
d7418acbe1 | ||
|
|
9e8a3fc4ff | ||
|
|
c502736841 | ||
|
|
5738143d4b | ||
|
|
50b48a8e6a | ||
|
|
a8f530bc4d | ||
|
|
dd68a8716e | ||
|
|
1aeb52caac | ||
|
|
54373fb384 | ||
|
|
50de7a31f0 | ||
|
|
8a3b74fe1f | ||
|
|
2c076bebc9 | ||
|
|
f746a73e26 | ||
|
|
5dca107621 | ||
|
|
8d6cc90fc5 | ||
|
|
90f55e6bd1 | ||
|
|
b5d3416563 | ||
|
|
de7c4b277c | ||
|
|
39342d98d6 | ||
|
|
89b765ec27 | ||
|
|
ab3d944667 | ||
|
|
112e10e933 | ||
|
|
9eb1b56e73 | ||
|
|
37678471c4 | ||
|
|
2df7387c91 | ||
|
|
5d06797905 | ||
|
|
15baffc484 | ||
|
|
e5c76f9dbd | ||
|
|
10bdf2422c | ||
|
|
065cde69b1 | ||
|
|
db6f266d97 | ||
|
|
e5472b5eb8 | ||
|
|
729c6d6827 | ||
|
|
3925071dd6 | ||
|
|
c0ce93236a | ||
|
|
37e1275f9e | ||
|
|
9f1cbbc6ed | ||
|
|
0834457f28 | ||
|
|
d70a5bbf15 | ||
|
|
1bbb64d956 | ||
|
|
e1cfd0f3e7 | ||
|
|
df7cbd6fbb | ||
|
|
1fdd9bd980 | ||
|
|
1987f905ed | ||
|
|
cd00a87db7 | ||
|
|
f9f5626ca4 | ||
|
|
722aae4fd1 | ||
|
|
c454dc36fc | ||
|
|
e3b775e035 | ||
|
|
64938ae6f2 | ||
|
|
604e117411 | ||
|
|
4986e7227e | ||
|
|
30af711c34 | ||
|
|
e135dc70c3 | ||
|
|
ab025507bc | ||
|
|
fb7552bfcf | ||
|
|
93472ee9e6 | ||
|
|
37ef6ac113 | ||
|
|
b87d6f9f48 | ||
|
|
22638e5927 | ||
|
|
841e5f514e | ||
|
|
ece4b43a81 | ||
|
|
0653aa469a | ||
|
|
ce9a68791b | ||
|
|
e1bc623f8f | ||
|
|
bd0ad6637a | ||
|
|
37629516cd | ||
|
|
b48fa8b695 | ||
|
|
f7e453971d | ||
|
|
54fa78c887 | ||
|
|
a23c719c8b | ||
|
|
584b647b96 | ||
|
|
93da18b667 |
41
.github/CONTRIBUTING.md
vendored
41
.github/CONTRIBUTING.md
vendored
@@ -3,43 +3,4 @@
|
||||
Hi there! Thank you for even being interested in contributing to LangChain.
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes.
|
||||
|
||||
To learn about how to contribute, please follow the [guides here](https://python.langchain.com/docs/contributing/)
|
||||
|
||||
## 🗺️ Guidelines
|
||||
|
||||
### 👩💻 Ways to contribute
|
||||
|
||||
There are many ways to contribute to LangChain. Here are some common ways people contribute:
|
||||
|
||||
- [**Documentation**](https://python.langchain.com/docs/contributing/documentation): Help improve our docs, including this one!
|
||||
- [**Code**](https://python.langchain.com/docs/contributing/code): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](https://python.langchain.com/docs/contributing/integrations): Help us integrate with your favorite vendors and tools.
|
||||
|
||||
### 🚩GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.
|
||||
|
||||
There is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help organize issues.
|
||||
|
||||
If you start working on an issue, please assign it to yourself.
|
||||
|
||||
If you are adding an issue, please try to keep it focused on a single, modular bug/improvement/feature.
|
||||
If two issues are related, or blocking, please link them rather than combining them.
|
||||
|
||||
We will try to keep these issues as up-to-date as possible, though
|
||||
with the rapid rate of development in this field some may get out of date.
|
||||
If you notice this happening, please let us know.
|
||||
|
||||
### 🙋Getting Help
|
||||
|
||||
Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please
|
||||
contact a maintainer! Not only do we want to help get you unblocked, but we also want to make sure that the process is
|
||||
smooth for future contributors.
|
||||
|
||||
In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase.
|
||||
If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help -
|
||||
we do not want these to get in the way of getting good code into the codebase.
|
||||
|
||||
### Contributor Documentation
|
||||
|
||||
To learn about how to contribute, please follow the [guides here](https://python.langchain.com/docs/contributing/)
|
||||
To learn how to contribute to LangChain, please follow the [contribution guide here](https://python.langchain.com/docs/contributing/).
|
||||
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,20 +1,29 @@
|
||||
<!-- Thank you for contributing to LangChain!
|
||||
Thank you for contributing to LangChain!
|
||||
|
||||
Please title your PR "<package>: <description>", where <package> is whichever of langchain, community, core, experimental, etc. is being modified.
|
||||
- [ ] **PR title**: "package: description"
|
||||
- Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes.
|
||||
- Example: "community: add foobar LLM"
|
||||
|
||||
Replace this entire comment with:
|
||||
- **Description:** a description of the change,
|
||||
- **Issue:** the issue # it fixes if applicable,
|
||||
- **Dependencies:** any dependencies required for this change,
|
||||
- **Twitter handle:** we announce bigger features on Twitter. If your PR gets announced, and you'd like a mention, we'll gladly shout you out!
|
||||
|
||||
Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` from the root of the package you've modified to check this locally.
|
||||
- [ ] **PR message**: ***Delete this entire checklist*** and replace with
|
||||
- **Description:** a description of the change
|
||||
- **Issue:** the issue # it fixes, if applicable
|
||||
- **Dependencies:** any dependencies required for this change
|
||||
- **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out!
|
||||
|
||||
See contribution guidelines for more information on how to write/run tests, lint, etc: https://python.langchain.com/docs/contributing/
|
||||
|
||||
If you're adding a new integration, please include:
|
||||
- [ ] **Add tests and docs**: If you're adding a new integration, please include
|
||||
1. a test for the integration, preferably unit tests that do not rely on network access,
|
||||
2. an example notebook showing its use. It lives in `docs/docs/integrations` directory.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of @baskaryan, @eyurtsev, @hwchase17.
|
||||
-->
|
||||
|
||||
- [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/
|
||||
|
||||
Additional guidelines:
|
||||
- Make sure optional dependencies are imported within a function.
|
||||
- Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests.
|
||||
- Most PRs should not touch more than one package.
|
||||
- Changes should be backwards compatible.
|
||||
- If you are adding something to community, do not re-import it in langchain.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
|
||||
|
||||
7
.github/actions/people/Dockerfile
vendored
Normal file
7
.github/actions/people/Dockerfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM python:3.9
|
||||
|
||||
RUN pip install httpx PyGithub "pydantic==2.0.2" pydantic-settings "pyyaml>=5.3.1,<6.0.0"
|
||||
|
||||
COPY ./app /app
|
||||
|
||||
CMD ["python", "/app/main.py"]
|
||||
11
.github/actions/people/action.yml
vendored
Normal file
11
.github/actions/people/action.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Adapted from https://github.com/tiangolo/fastapi/blob/master/.github/actions/people/action.yml
|
||||
name: "Generate LangChain People"
|
||||
description: "Generate the data for the LangChain People page"
|
||||
author: "Jacob Lee <jacob@langchain.dev>"
|
||||
inputs:
|
||||
token:
|
||||
description: 'User token, to read the GitHub API. Can be passed in using {{ secrets.LANGCHAIN_PEOPLE_GITHUB_TOKEN }}'
|
||||
required: true
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
||||
641
.github/actions/people/app/main.py
vendored
Normal file
641
.github/actions/people/app/main.py
vendored
Normal file
@@ -0,0 +1,641 @@
|
||||
# Adapted from https://github.com/tiangolo/fastapi/blob/master/.github/actions/people/app/main.py
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import Counter
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Container, Dict, List, Set, Union
|
||||
|
||||
import httpx
|
||||
import yaml
|
||||
from github import Github
|
||||
from pydantic import BaseModel, SecretStr
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
github_graphql_url = "https://api.github.com/graphql"
|
||||
questions_category_id = "DIC_kwDOIPDwls4CS6Ve"
|
||||
|
||||
# discussions_query = """
|
||||
# query Q($after: String, $category_id: ID) {
|
||||
# repository(name: "langchain", owner: "langchain-ai") {
|
||||
# discussions(first: 100, after: $after, categoryId: $category_id) {
|
||||
# edges {
|
||||
# cursor
|
||||
# node {
|
||||
# number
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# title
|
||||
# createdAt
|
||||
# comments(first: 100) {
|
||||
# nodes {
|
||||
# createdAt
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# isAnswer
|
||||
# replies(first: 10) {
|
||||
# nodes {
|
||||
# createdAt
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# """
|
||||
|
||||
# issues_query = """
|
||||
# query Q($after: String) {
|
||||
# repository(name: "langchain", owner: "langchain-ai") {
|
||||
# issues(first: 100, after: $after) {
|
||||
# edges {
|
||||
# cursor
|
||||
# node {
|
||||
# number
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# title
|
||||
# createdAt
|
||||
# state
|
||||
# comments(first: 100) {
|
||||
# nodes {
|
||||
# createdAt
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# """
|
||||
|
||||
prs_query = """
|
||||
query Q($after: String) {
|
||||
repository(name: "langchain", owner: "langchain-ai") {
|
||||
pullRequests(first: 100, after: $after, states: MERGED) {
|
||||
edges {
|
||||
cursor
|
||||
node {
|
||||
changedFiles
|
||||
additions
|
||||
deletions
|
||||
number
|
||||
labels(first: 100) {
|
||||
nodes {
|
||||
name
|
||||
}
|
||||
}
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
url
|
||||
... on User {
|
||||
twitterUsername
|
||||
}
|
||||
}
|
||||
title
|
||||
createdAt
|
||||
state
|
||||
reviews(first:100) {
|
||||
nodes {
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
url
|
||||
... on User {
|
||||
twitterUsername
|
||||
}
|
||||
}
|
||||
state
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class Author(BaseModel):
|
||||
login: str
|
||||
avatarUrl: str
|
||||
url: str
|
||||
twitterUsername: Union[str, None] = None
|
||||
|
||||
|
||||
# Issues and Discussions
|
||||
|
||||
|
||||
class CommentsNode(BaseModel):
|
||||
createdAt: datetime
|
||||
author: Union[Author, None] = None
|
||||
|
||||
|
||||
class Replies(BaseModel):
|
||||
nodes: List[CommentsNode]
|
||||
|
||||
|
||||
class DiscussionsCommentsNode(CommentsNode):
|
||||
replies: Replies
|
||||
|
||||
|
||||
class Comments(BaseModel):
|
||||
nodes: List[CommentsNode]
|
||||
|
||||
|
||||
class DiscussionsComments(BaseModel):
|
||||
nodes: List[DiscussionsCommentsNode]
|
||||
|
||||
|
||||
class IssuesNode(BaseModel):
|
||||
number: int
|
||||
author: Union[Author, None] = None
|
||||
title: str
|
||||
createdAt: datetime
|
||||
state: str
|
||||
comments: Comments
|
||||
|
||||
|
||||
class DiscussionsNode(BaseModel):
|
||||
number: int
|
||||
author: Union[Author, None] = None
|
||||
title: str
|
||||
createdAt: datetime
|
||||
comments: DiscussionsComments
|
||||
|
||||
|
||||
class IssuesEdge(BaseModel):
|
||||
cursor: str
|
||||
node: IssuesNode
|
||||
|
||||
|
||||
class DiscussionsEdge(BaseModel):
|
||||
cursor: str
|
||||
node: DiscussionsNode
|
||||
|
||||
|
||||
class Issues(BaseModel):
|
||||
edges: List[IssuesEdge]
|
||||
|
||||
|
||||
class Discussions(BaseModel):
|
||||
edges: List[DiscussionsEdge]
|
||||
|
||||
|
||||
class IssuesRepository(BaseModel):
|
||||
issues: Issues
|
||||
|
||||
|
||||
class DiscussionsRepository(BaseModel):
|
||||
discussions: Discussions
|
||||
|
||||
|
||||
class IssuesResponseData(BaseModel):
|
||||
repository: IssuesRepository
|
||||
|
||||
|
||||
class DiscussionsResponseData(BaseModel):
|
||||
repository: DiscussionsRepository
|
||||
|
||||
|
||||
class IssuesResponse(BaseModel):
|
||||
data: IssuesResponseData
|
||||
|
||||
|
||||
class DiscussionsResponse(BaseModel):
|
||||
data: DiscussionsResponseData
|
||||
|
||||
|
||||
# PRs
|
||||
|
||||
|
||||
class LabelNode(BaseModel):
|
||||
name: str
|
||||
|
||||
|
||||
class Labels(BaseModel):
|
||||
nodes: List[LabelNode]
|
||||
|
||||
|
||||
class ReviewNode(BaseModel):
|
||||
author: Union[Author, None] = None
|
||||
state: str
|
||||
|
||||
|
||||
class Reviews(BaseModel):
|
||||
nodes: List[ReviewNode]
|
||||
|
||||
|
||||
class PullRequestNode(BaseModel):
|
||||
number: int
|
||||
labels: Labels
|
||||
author: Union[Author, None] = None
|
||||
changedFiles: int
|
||||
additions: int
|
||||
deletions: int
|
||||
title: str
|
||||
createdAt: datetime
|
||||
state: str
|
||||
reviews: Reviews
|
||||
# comments: Comments
|
||||
|
||||
|
||||
class PullRequestEdge(BaseModel):
|
||||
cursor: str
|
||||
node: PullRequestNode
|
||||
|
||||
|
||||
class PullRequests(BaseModel):
|
||||
edges: List[PullRequestEdge]
|
||||
|
||||
|
||||
class PRsRepository(BaseModel):
|
||||
pullRequests: PullRequests
|
||||
|
||||
|
||||
class PRsResponseData(BaseModel):
|
||||
repository: PRsRepository
|
||||
|
||||
|
||||
class PRsResponse(BaseModel):
|
||||
data: PRsResponseData
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
input_token: SecretStr
|
||||
github_repository: str
|
||||
httpx_timeout: int = 30
|
||||
|
||||
|
||||
def get_graphql_response(
|
||||
*,
|
||||
settings: Settings,
|
||||
query: str,
|
||||
after: Union[str, None] = None,
|
||||
category_id: Union[str, None] = None,
|
||||
) -> Dict[str, Any]:
|
||||
headers = {"Authorization": f"token {settings.input_token.get_secret_value()}"}
|
||||
# category_id is only used by one query, but GraphQL allows unused variables, so
|
||||
# keep it here for simplicity
|
||||
variables = {"after": after, "category_id": category_id}
|
||||
response = httpx.post(
|
||||
github_graphql_url,
|
||||
headers=headers,
|
||||
timeout=settings.httpx_timeout,
|
||||
json={"query": query, "variables": variables, "operationName": "Q"},
|
||||
)
|
||||
if response.status_code != 200:
|
||||
logging.error(
|
||||
f"Response was not 200, after: {after}, category_id: {category_id}"
|
||||
)
|
||||
logging.error(response.text)
|
||||
raise RuntimeError(response.text)
|
||||
data = response.json()
|
||||
if "errors" in data:
|
||||
logging.error(f"Errors in response, after: {after}, category_id: {category_id}")
|
||||
logging.error(data["errors"])
|
||||
logging.error(response.text)
|
||||
raise RuntimeError(response.text)
|
||||
return data
|
||||
|
||||
|
||||
# def get_graphql_issue_edges(*, settings: Settings, after: Union[str, None] = None):
|
||||
# data = get_graphql_response(settings=settings, query=issues_query, after=after)
|
||||
# graphql_response = IssuesResponse.model_validate(data)
|
||||
# return graphql_response.data.repository.issues.edges
|
||||
|
||||
|
||||
# def get_graphql_question_discussion_edges(
|
||||
# *,
|
||||
# settings: Settings,
|
||||
# after: Union[str, None] = None,
|
||||
# ):
|
||||
# data = get_graphql_response(
|
||||
# settings=settings,
|
||||
# query=discussions_query,
|
||||
# after=after,
|
||||
# category_id=questions_category_id,
|
||||
# )
|
||||
# graphql_response = DiscussionsResponse.model_validate(data)
|
||||
# return graphql_response.data.repository.discussions.edges
|
||||
|
||||
|
||||
def get_graphql_pr_edges(*, settings: Settings, after: Union[str, None] = None):
|
||||
if after is None:
|
||||
print("Querying PRs...")
|
||||
else:
|
||||
print(f"Querying PRs with cursor {after}...")
|
||||
data = get_graphql_response(
|
||||
settings=settings,
|
||||
query=prs_query,
|
||||
after=after
|
||||
)
|
||||
graphql_response = PRsResponse.model_validate(data)
|
||||
return graphql_response.data.repository.pullRequests.edges
|
||||
|
||||
|
||||
# def get_issues_experts(settings: Settings):
|
||||
# issue_nodes: List[IssuesNode] = []
|
||||
# issue_edges = get_graphql_issue_edges(settings=settings)
|
||||
|
||||
# while issue_edges:
|
||||
# for edge in issue_edges:
|
||||
# issue_nodes.append(edge.node)
|
||||
# last_edge = issue_edges[-1]
|
||||
# issue_edges = get_graphql_issue_edges(settings=settings, after=last_edge.cursor)
|
||||
|
||||
# commentors = Counter()
|
||||
# last_month_commentors = Counter()
|
||||
# authors: Dict[str, Author] = {}
|
||||
|
||||
# now = datetime.now(tz=timezone.utc)
|
||||
# one_month_ago = now - timedelta(days=30)
|
||||
|
||||
# for issue in issue_nodes:
|
||||
# issue_author_name = None
|
||||
# if issue.author:
|
||||
# authors[issue.author.login] = issue.author
|
||||
# issue_author_name = issue.author.login
|
||||
# issue_commentors = set()
|
||||
# for comment in issue.comments.nodes:
|
||||
# if comment.author:
|
||||
# authors[comment.author.login] = comment.author
|
||||
# if comment.author.login != issue_author_name:
|
||||
# issue_commentors.add(comment.author.login)
|
||||
# for author_name in issue_commentors:
|
||||
# commentors[author_name] += 1
|
||||
# if issue.createdAt > one_month_ago:
|
||||
# last_month_commentors[author_name] += 1
|
||||
|
||||
# return commentors, last_month_commentors, authors
|
||||
|
||||
|
||||
# def get_discussions_experts(settings: Settings):
|
||||
# discussion_nodes: List[DiscussionsNode] = []
|
||||
# discussion_edges = get_graphql_question_discussion_edges(settings=settings)
|
||||
|
||||
# while discussion_edges:
|
||||
# for discussion_edge in discussion_edges:
|
||||
# discussion_nodes.append(discussion_edge.node)
|
||||
# last_edge = discussion_edges[-1]
|
||||
# discussion_edges = get_graphql_question_discussion_edges(
|
||||
# settings=settings, after=last_edge.cursor
|
||||
# )
|
||||
|
||||
# commentors = Counter()
|
||||
# last_month_commentors = Counter()
|
||||
# authors: Dict[str, Author] = {}
|
||||
|
||||
# now = datetime.now(tz=timezone.utc)
|
||||
# one_month_ago = now - timedelta(days=30)
|
||||
|
||||
# for discussion in discussion_nodes:
|
||||
# discussion_author_name = None
|
||||
# if discussion.author:
|
||||
# authors[discussion.author.login] = discussion.author
|
||||
# discussion_author_name = discussion.author.login
|
||||
# discussion_commentors = set()
|
||||
# for comment in discussion.comments.nodes:
|
||||
# if comment.author:
|
||||
# authors[comment.author.login] = comment.author
|
||||
# if comment.author.login != discussion_author_name:
|
||||
# discussion_commentors.add(comment.author.login)
|
||||
# for reply in comment.replies.nodes:
|
||||
# if reply.author:
|
||||
# authors[reply.author.login] = reply.author
|
||||
# if reply.author.login != discussion_author_name:
|
||||
# discussion_commentors.add(reply.author.login)
|
||||
# for author_name in discussion_commentors:
|
||||
# commentors[author_name] += 1
|
||||
# if discussion.createdAt > one_month_ago:
|
||||
# last_month_commentors[author_name] += 1
|
||||
# return commentors, last_month_commentors, authors
|
||||
|
||||
|
||||
# def get_experts(settings: Settings):
|
||||
# (
|
||||
# discussions_commentors,
|
||||
# discussions_last_month_commentors,
|
||||
# discussions_authors,
|
||||
# ) = get_discussions_experts(settings=settings)
|
||||
# commentors = discussions_commentors
|
||||
# last_month_commentors = discussions_last_month_commentors
|
||||
# authors = {**discussions_authors}
|
||||
# return commentors, last_month_commentors, authors
|
||||
|
||||
|
||||
def _logistic(x, k):
|
||||
return x / (x + k)
|
||||
|
||||
|
||||
def get_contributors(settings: Settings):
|
||||
pr_nodes: List[PullRequestNode] = []
|
||||
pr_edges = get_graphql_pr_edges(settings=settings)
|
||||
|
||||
while pr_edges:
|
||||
for edge in pr_edges:
|
||||
pr_nodes.append(edge.node)
|
||||
last_edge = pr_edges[-1]
|
||||
pr_edges = get_graphql_pr_edges(settings=settings, after=last_edge.cursor)
|
||||
|
||||
contributors = Counter()
|
||||
contributor_scores = Counter()
|
||||
recent_contributor_scores = Counter()
|
||||
reviewers = Counter()
|
||||
authors: Dict[str, Author] = {}
|
||||
|
||||
for pr in pr_nodes:
|
||||
pr_reviewers: Set[str] = set()
|
||||
for review in pr.reviews.nodes:
|
||||
if review.author:
|
||||
authors[review.author.login] = review.author
|
||||
pr_reviewers.add(review.author.login)
|
||||
for reviewer in pr_reviewers:
|
||||
reviewers[reviewer] += 1
|
||||
if pr.author:
|
||||
authors[pr.author.login] = pr.author
|
||||
contributors[pr.author.login] += 1
|
||||
files_changed = pr.changedFiles
|
||||
lines_changed = pr.additions + pr.deletions
|
||||
score = _logistic(files_changed, 20) + _logistic(lines_changed, 100)
|
||||
contributor_scores[pr.author.login] += score
|
||||
three_months_ago = (datetime.now(timezone.utc) - timedelta(days=3*30))
|
||||
if pr.createdAt > three_months_ago:
|
||||
recent_contributor_scores[pr.author.login] += score
|
||||
return contributors, contributor_scores, recent_contributor_scores, reviewers, authors
|
||||
|
||||
|
||||
def get_top_users(
|
||||
*,
|
||||
counter: Counter,
|
||||
min_count: int,
|
||||
authors: Dict[str, Author],
|
||||
skip_users: Container[str],
|
||||
):
|
||||
users = []
|
||||
for commentor, count in counter.most_common():
|
||||
if commentor in skip_users:
|
||||
continue
|
||||
if count >= min_count:
|
||||
author = authors[commentor]
|
||||
users.append(
|
||||
{
|
||||
"login": commentor,
|
||||
"count": count,
|
||||
"avatarUrl": author.avatarUrl,
|
||||
"twitterUsername": author.twitterUsername,
|
||||
"url": author.url,
|
||||
}
|
||||
)
|
||||
return users
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
settings = Settings()
|
||||
logging.info(f"Using config: {settings.model_dump_json()}")
|
||||
g = Github(settings.input_token.get_secret_value())
|
||||
repo = g.get_repo(settings.github_repository)
|
||||
# question_commentors, question_last_month_commentors, question_authors = get_experts(
|
||||
# settings=settings
|
||||
# )
|
||||
contributors, contributor_scores, recent_contributor_scores, reviewers, pr_authors = get_contributors(
|
||||
settings=settings
|
||||
)
|
||||
# authors = {**question_authors, **pr_authors}
|
||||
authors = {**pr_authors}
|
||||
maintainers_logins = {
|
||||
"hwchase17",
|
||||
"agola11",
|
||||
"baskaryan",
|
||||
"hinthornw",
|
||||
"nfcampos",
|
||||
"efriis",
|
||||
"eyurtsev",
|
||||
"rlancemartin"
|
||||
}
|
||||
hidden_logins = {
|
||||
"dev2049",
|
||||
"vowelparrot",
|
||||
"obi1kenobi",
|
||||
"langchain-infra",
|
||||
"jacoblee93",
|
||||
"dqbd",
|
||||
"bracesproul",
|
||||
"akira",
|
||||
}
|
||||
bot_names = {"dosubot", "github-actions", "CodiumAI-Agent"}
|
||||
maintainers = []
|
||||
for login in maintainers_logins:
|
||||
user = authors[login]
|
||||
maintainers.append(
|
||||
{
|
||||
"login": login,
|
||||
"count": contributors[login], #+ question_commentors[login],
|
||||
"avatarUrl": user.avatarUrl,
|
||||
"twitterUsername": user.twitterUsername,
|
||||
"url": user.url,
|
||||
}
|
||||
)
|
||||
|
||||
# min_count_expert = 10
|
||||
# min_count_last_month = 3
|
||||
min_score_contributor = 1
|
||||
min_count_reviewer = 5
|
||||
skip_users = maintainers_logins | bot_names | hidden_logins
|
||||
# experts = get_top_users(
|
||||
# counter=question_commentors,
|
||||
# min_count=min_count_expert,
|
||||
# authors=authors,
|
||||
# skip_users=skip_users,
|
||||
# )
|
||||
# last_month_active = get_top_users(
|
||||
# counter=question_last_month_commentors,
|
||||
# min_count=min_count_last_month,
|
||||
# authors=authors,
|
||||
# skip_users=skip_users,
|
||||
# )
|
||||
top_recent_contributors = get_top_users(
|
||||
counter=recent_contributor_scores,
|
||||
min_count=min_score_contributor,
|
||||
authors=authors,
|
||||
skip_users=skip_users,
|
||||
)
|
||||
top_contributors = get_top_users(
|
||||
counter=contributor_scores,
|
||||
min_count=min_score_contributor,
|
||||
authors=authors,
|
||||
skip_users=skip_users,
|
||||
)
|
||||
top_reviewers = get_top_users(
|
||||
counter=reviewers,
|
||||
min_count=min_count_reviewer,
|
||||
authors=authors,
|
||||
skip_users=skip_users,
|
||||
)
|
||||
|
||||
people = {
|
||||
"maintainers": maintainers,
|
||||
# "experts": experts,
|
||||
# "last_month_active": last_month_active,
|
||||
"top_recent_contributors": top_recent_contributors,
|
||||
"top_contributors": top_contributors,
|
||||
"top_reviewers": top_reviewers,
|
||||
}
|
||||
people_path = Path("./docs/data/people.yml")
|
||||
people_old_content = people_path.read_text(encoding="utf-8")
|
||||
new_people_content = yaml.dump(
|
||||
people, sort_keys=False, width=200, allow_unicode=True
|
||||
)
|
||||
if (
|
||||
people_old_content == new_people_content
|
||||
):
|
||||
logging.info("The LangChain People data hasn't changed, finishing.")
|
||||
sys.exit(0)
|
||||
people_path.write_text(new_people_content, encoding="utf-8")
|
||||
logging.info("Setting up GitHub Actions git user")
|
||||
subprocess.run(["git", "config", "user.name", "github-actions"], check=True)
|
||||
subprocess.run(
|
||||
["git", "config", "user.email", "github-actions@github.com"], check=True
|
||||
)
|
||||
branch_name = "langchain/langchain-people"
|
||||
logging.info(f"Creating a new branch {branch_name}")
|
||||
subprocess.run(["git", "checkout", "-B", branch_name], check=True)
|
||||
logging.info("Adding updated file")
|
||||
subprocess.run(
|
||||
["git", "add", str(people_path)], check=True
|
||||
)
|
||||
logging.info("Committing updated file")
|
||||
message = "👥 Update LangChain people data"
|
||||
result = subprocess.run(["git", "commit", "-m", message], check=True)
|
||||
logging.info("Pushing branch")
|
||||
subprocess.run(["git", "push", "origin", branch_name, "-f"], check=True)
|
||||
logging.info("Creating PR")
|
||||
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
|
||||
logging.info(f"Created PR: {pr.number}")
|
||||
logging.info("Finished")
|
||||
16
.github/workflows/_all_ci.yml
vendored
16
.github/workflows/_all_ci.yml
vendored
@@ -108,3 +108,19 @@ jobs:
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
ci_end:
|
||||
name: "CI Success"
|
||||
needs: [lint, test, compile-integration-tests, dependencies, extended-tests]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "CI Success"
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
echo "Success"
|
||||
exit 0
|
||||
- name: "CI Failure"
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "Failure"
|
||||
exit 1
|
||||
|
||||
6
.github/workflows/_integration_test.yml
vendored
6
.github/workflows/_integration_test.yml
vendored
@@ -52,6 +52,7 @@ jobs:
|
||||
- name: Run integration tests
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
@@ -62,8 +63,13 @@ jobs:
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
|
||||
ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
|
||||
ASTRA_DB_KEYSPACE: ${{ secrets.ASTRA_DB_KEYSPACE }}
|
||||
run: |
|
||||
make integration_tests
|
||||
|
||||
|
||||
13
.github/workflows/_release.yml
vendored
13
.github/workflows/_release.yml
vendored
@@ -166,18 +166,31 @@ jobs:
|
||||
- name: Run integration tests
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
|
||||
ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
|
||||
ASTRA_DB_KEYSPACE: ${{ secrets.ASTRA_DB_KEYSPACE }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
|
||||
52
.github/workflows/api_doc_build.yml
vendored
Normal file
52
.github/workflows/api_doc_build.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: API docs build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: bagatur/api_docs_build
|
||||
|
||||
- name: Set Git config
|
||||
run: |
|
||||
git config --local user.email "actions@github.com"
|
||||
git config --local user.name "Github Actions"
|
||||
|
||||
- name: Merge master
|
||||
run: |
|
||||
git fetch origin master
|
||||
git merge origin/master -m "Merge master" --allow-unrelated-histories -X theirs
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: api-docs
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
poetry run python -m pip install ./libs/partners/*
|
||||
poetry run python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
|
||||
- name: Build docs
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
poetry run python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference api_reference_build/html -j auto
|
||||
|
||||
# https://github.com/marketplace/actions/add-commit
|
||||
- uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
message: 'Update API docs build'
|
||||
1
.github/workflows/codespell.yml
vendored
1
.github/workflows/codespell.yml
vendored
@@ -34,3 +34,4 @@ jobs:
|
||||
with:
|
||||
skip: guide_imports.json
|
||||
ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
exclude_file: libs/community/langchain_community/llms/yuan2.py
|
||||
|
||||
36
.github/workflows/people.yml
vendored
Normal file
36
.github/workflows/people.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: LangChain People
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 14 1 * *"
|
||||
push:
|
||||
branches: [jacob/people]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
debug_enabled:
|
||||
description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
langchain-people:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- uses: actions/checkout@v4
|
||||
# Ref: https://github.com/actions/runner/issues/2033
|
||||
- name: Fix git safe.directory in container
|
||||
run: mkdir -p /home/runner/work/_temp/_github_home && printf "[safe]\n\tdirectory = /github/workspace" > /home/runner/work/_temp/_github_home/.gitconfig
|
||||
# Allow debugging with tmate
|
||||
- name: Setup tmate session
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled == 'true' }}
|
||||
with:
|
||||
limit-access-to-actor: true
|
||||
- uses: ./.github/actions/people
|
||||
with:
|
||||
token: ${{ secrets.LANGCHAIN_PEOPLE_GITHUB_TOKEN }}
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -177,4 +177,6 @@ docs/docs/build
|
||||
docs/docs/node_modules
|
||||
docs/docs/yarn.lock
|
||||
_dist
|
||||
docs/docs/templates
|
||||
docs/docs/templates
|
||||
|
||||
prof
|
||||
|
||||
@@ -13,15 +13,8 @@ build:
|
||||
tools:
|
||||
python: "3.11"
|
||||
commands:
|
||||
- python -m virtualenv $READTHEDOCS_VIRTUALENV_PATH
|
||||
- python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
- python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
- python -m pip install ./libs/partners/*
|
||||
- python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
- python docs/api_reference/create_api_rst.py
|
||||
- cat docs/api_reference/conf.py
|
||||
- python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference $READTHEDOCS_OUTPUT/html -j auto
|
||||
|
||||
- mkdir -p $READTHEDOCS_OUTPUT
|
||||
- cp -r api_reference_build/* $READTHEDOCS_OUTPUT
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/api_reference/conf.py
|
||||
|
||||
7
Makefile
7
Makefile
@@ -15,7 +15,12 @@ docs_build:
|
||||
docs/.local_build.sh
|
||||
|
||||
docs_clean:
|
||||
rm -r _dist
|
||||
@if [ -d _dist ]; then \
|
||||
rm -r _dist; \
|
||||
echo "Directory _dist has been cleaned."; \
|
||||
else \
|
||||
echo "Nothing to clean."; \
|
||||
fi
|
||||
|
||||
docs_linkcheck:
|
||||
poetry run linkchecker _dist/docs/ --ignore-url node_modules
|
||||
|
||||
@@ -18,7 +18,7 @@ Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langc
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to get off the waitlist or speak with our sales team.
|
||||
Fill out [this form](https://www.langchain.com/contact-sales) to speak with our sales team.
|
||||
|
||||
## Quick Install
|
||||
|
||||
|
||||
@@ -520,7 +520,7 @@
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
284
cookbook/amazon_personalize_how_to.ipynb
Normal file
284
cookbook/amazon_personalize_how_to.ipynb
Normal file
@@ -0,0 +1,284 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Amazon Personalize\n",
|
||||
"\n",
|
||||
"[Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html) is a fully managed machine learning service that uses your data to generate item recommendations for your users. It can also generate user segments based on the users' affinity for certain items or item metadata.\n",
|
||||
"\n",
|
||||
"This notebook goes through how to use Amazon Personalize Chain. You need a Amazon Personalize campaign_arn or a recommender_arn before you get started with the below notebook.\n",
|
||||
"\n",
|
||||
"Following is a [tutorial](https://github.com/aws-samples/retail-demo-store/blob/master/workshop/1-Personalization/Lab-1-Introduction-and-data-preparation.ipynb) to setup a campaign_arn/recommender_arn on Amazon Personalize. Once the campaign_arn/recommender_arn is setup, you can use it in the langchain ecosystem. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Install Dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Sample Use-cases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.1 [Use-case-1] Setup Amazon Personalize Client and retrieve recommendations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.recommenders import AmazonPersonalize\n",
|
||||
"\n",
|
||||
"recommender_arn = \"<insert_arn>\"\n",
|
||||
"\n",
|
||||
"client = AmazonPersonalize(\n",
|
||||
" credentials_profile_name=\"default\",\n",
|
||||
" region_name=\"us-west-2\",\n",
|
||||
" recommender_arn=recommender_arn,\n",
|
||||
")\n",
|
||||
"client.get_recommendations(user_id=\"1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.2 [Use-case-2] Invoke Personalize Chain for summarizing results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms.bedrock import Bedrock\n",
|
||||
"from langchain_experimental.recommenders import AmazonPersonalizeChain\n",
|
||||
"\n",
|
||||
"bedrock_llm = Bedrock(model_id=\"anthropic.claude-v2\", region_name=\"us-west-2\")\n",
|
||||
"\n",
|
||||
"# Create personalize chain\n",
|
||||
"# Use return_direct=True if you do not want summary\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False\n",
|
||||
")\n",
|
||||
"response = chain({\"user_id\": \"1\"})\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.3 [Use-Case-3] Invoke Amazon Personalize Chain using your own prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_QUERY = \"\"\"\n",
|
||||
"You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, \n",
|
||||
" given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. \n",
|
||||
" The movies to recommend and their information is contained in the <movie> tag. \n",
|
||||
" All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. \n",
|
||||
" Put the email between <email> tags.\n",
|
||||
"\n",
|
||||
" <movie>\n",
|
||||
" {result} \n",
|
||||
" </movie>\n",
|
||||
"\n",
|
||||
" Assistant:\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT = PromptTemplate(input_variables=[\"result\"], template=RANDOM_PROMPT_QUERY)\n",
|
||||
"\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False, prompt_template=RANDOM_PROMPT\n",
|
||||
")\n",
|
||||
"chain.run({\"user_id\": \"1\", \"item_id\": \"234\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.4 [Use-case-4] Invoke Amazon Personalize in a Sequential Chain "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, SequentialChain\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_QUERY_2 = \"\"\"\n",
|
||||
"You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, \n",
|
||||
" given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. \n",
|
||||
" You want the email to impress the user, so make it appealing to them.\n",
|
||||
" The movies to recommend and their information is contained in the <movie> tag. \n",
|
||||
" All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. \n",
|
||||
" Put the email between <email> tags.\n",
|
||||
"\n",
|
||||
" <movie>\n",
|
||||
" {result}\n",
|
||||
" </movie>\n",
|
||||
"\n",
|
||||
" Assistant:\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_2 = PromptTemplate(\n",
|
||||
" input_variables=[\"result\"], template=RANDOM_PROMPT_QUERY_2\n",
|
||||
")\n",
|
||||
"personalize_chain_instance = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=True\n",
|
||||
")\n",
|
||||
"random_chain_instance = LLMChain(llm=bedrock_llm, prompt=RANDOM_PROMPT_2)\n",
|
||||
"overall_chain = SequentialChain(\n",
|
||||
" chains=[personalize_chain_instance, random_chain_instance],\n",
|
||||
" input_variables=[\"user_id\"],\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"overall_chain.run({\"user_id\": \"1\", \"item_id\": \"234\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.5 [Use-case-5] Invoke Amazon Personalize and retrieve metadata "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"recommender_arn = \"<insert_arn>\"\n",
|
||||
"metadata_column_names = [\n",
|
||||
" \"<insert metadataColumnName-1>\",\n",
|
||||
" \"<insert metadataColumnName-2>\",\n",
|
||||
"]\n",
|
||||
"metadataMap = {\"ITEMS\": metadata_column_names}\n",
|
||||
"\n",
|
||||
"client = AmazonPersonalize(\n",
|
||||
" credentials_profile_name=\"default\",\n",
|
||||
" region_name=\"us-west-2\",\n",
|
||||
" recommender_arn=recommender_arn,\n",
|
||||
")\n",
|
||||
"client.get_recommendations(user_id=\"1\", metadataColumns=metadataMap)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.6 [Use-Case 6] Invoke Personalize Chain with returned metadata for summarizing results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bedrock_llm = Bedrock(model_id=\"anthropic.claude-v2\", region_name=\"us-west-2\")\n",
|
||||
"\n",
|
||||
"# Create personalize chain\n",
|
||||
"# Use return_direct=True if you do not want summary\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False\n",
|
||||
")\n",
|
||||
"response = chain({\"user_id\": \"1\", \"metadata_columns\": metadataMap})\n",
|
||||
"print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "15e58ce194949b77a891bd4339ce3d86a9bd138e905926019517993f97db9e6c"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
922
cookbook/apache_kafka_message_handling.ipynb
Normal file
922
cookbook/apache_kafka_message_handling.ipynb
Normal file
@@ -0,0 +1,922 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rT1cmV4qCa2X"
|
||||
},
|
||||
"source": [
|
||||
"# Using Apache Kafka to route messages\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows you how to use LangChain's standard chat features while passing the chat messages back and forth via Apache Kafka.\n",
|
||||
"\n",
|
||||
"This goal is to simulate an architecture where the chat front end and the LLM are running as separate services that need to communicate with one another over an internal nework.\n",
|
||||
"\n",
|
||||
"It's an alternative to typical pattern of requesting a reponse from the model via a REST API (there's more info on why you would want to do this at the end of the notebook)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "UPYtfAR_9YxZ"
|
||||
},
|
||||
"source": [
|
||||
"### 1. Install the main dependencies\n",
|
||||
"\n",
|
||||
"Dependencies include:\n",
|
||||
"\n",
|
||||
"- The Quix Streams library for managing interactions with Apache Kafka (or Kafka-like tools such as Redpanda) in a \"Pandas-like\" way.\n",
|
||||
"- The LangChain library for managing interactions with Llama-2 and storing conversation state."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "ZX5tfKiy9cN-"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install quixstreams==2.1.2a langchain==0.0.340 huggingface_hub==0.19.4 langchain-experimental==0.0.42 python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "losTSdTB9d9O"
|
||||
},
|
||||
"source": [
|
||||
"### 2. Build and install the llama-cpp-python library (with CUDA enabled so that we can advantage of Google Colab GPU\n",
|
||||
"\n",
|
||||
"The `llama-cpp-python` library is a Python wrapper around the `llama-cpp` library which enables you to efficiently leverage just a CPU to run quantized LLMs.\n",
|
||||
"\n",
|
||||
"When you use the standard `pip install llama-cpp-python` command, you do not get GPU support by default. Generation can be very slow if you rely on just the CPU in Google Colab, so the following command adds an extra option to build and install\n",
|
||||
"`llama-cpp-python` with GPU support (make sure you have a GPU-enabled runtime selected in Google Colab)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "-JCQdl1G9tbl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5_vjVIAh9rLl"
|
||||
},
|
||||
"source": [
|
||||
"### 3. Download and setup Kafka and Zookeeper instances\n",
|
||||
"\n",
|
||||
"Download the Kafka binaries from the Apache website and start the servers as daemons. We'll use the default configurations (provided by Apache Kafka) for spinning up the instances."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "zFz7czGRW5Wr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!curl -sSOL https://dlcdn.apache.org/kafka/3.6.1/kafka_2.13-3.6.1.tgz\n",
|
||||
"!tar -xzf kafka_2.13-3.6.1.tgz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Uf7NR_UZ9wye"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!./kafka_2.13-3.6.1/bin/zookeeper-server-start.sh -daemon ./kafka_2.13-3.6.1/config/zookeeper.properties\n",
|
||||
"!./kafka_2.13-3.6.1/bin/kafka-server-start.sh -daemon ./kafka_2.13-3.6.1/config/server.properties\n",
|
||||
"!echo \"Waiting for 10 secs until kafka and zookeeper services are up and running\"\n",
|
||||
"!sleep 10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "H3SafFuS94p1"
|
||||
},
|
||||
"source": [
|
||||
"### 4. Check that the Kafka Daemons are running\n",
|
||||
"\n",
|
||||
"Show the running processes and filter it for Java processes (you should see two—one for each server)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "CZDC2lQP99yp"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!ps aux | grep -E '[j]ava'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Snoxmjb5-V37"
|
||||
},
|
||||
"source": [
|
||||
"### 5. Import the required dependencies and initialize required variables\n",
|
||||
"\n",
|
||||
"Import the Quix Streams library for interacting with Kafka, and the necessary LangChain components for running a `ConversationChain`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"id": "plR9e_MF-XL5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import utility libraries\n",
|
||||
"import json\n",
|
||||
"import random\n",
|
||||
"import re\n",
|
||||
"import time\n",
|
||||
"import uuid\n",
|
||||
"from os import environ\n",
|
||||
"from pathlib import Path\n",
|
||||
"from random import choice, randint, random\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"# Import a Hugging Face utility to download models directly from Hugging Face hub:\n",
|
||||
"from huggingface_hub import hf_hub_download\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"\n",
|
||||
"# Import Langchain modules for managing prompts and conversation chains:\n",
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"from langchain.memory import ConversationTokenBufferMemory\n",
|
||||
"from langchain.prompts import PromptTemplate, load_prompt\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_experimental.chat_models import Llama2Chat\n",
|
||||
"from quixstreams import Application, State, message_key\n",
|
||||
"\n",
|
||||
"# Import Quix dependencies\n",
|
||||
"from quixstreams.kafka import Producer\n",
|
||||
"\n",
|
||||
"# Initialize global variables.\n",
|
||||
"AGENT_ROLE = \"AI\"\n",
|
||||
"chat_id = \"\"\n",
|
||||
"\n",
|
||||
"# Set the current role to the role constant and initialize variables for supplementary customer metadata:\n",
|
||||
"role = AGENT_ROLE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "HgJjJ9aZ-liy"
|
||||
},
|
||||
"source": [
|
||||
"### 6. Download the \"llama-2-7b-chat.Q4_K_M.gguf\" model\n",
|
||||
"\n",
|
||||
"Download the quantized LLama-2 7B model from Hugging Face which we will use as a local LLM (rather than relying on REST API calls to an external service)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 67,
|
||||
"referenced_widgets": [
|
||||
"969343cdbe604a26926679bbf8bd2dda",
|
||||
"d8b8370c9b514715be7618bfe6832844",
|
||||
"0def954cca89466b8408fadaf3b82e64",
|
||||
"462482accc664729980562e208ceb179",
|
||||
"80d842f73c564dc7b7cc316c763e2633",
|
||||
"fa055d9f2a9d4a789e9cf3c89e0214e5",
|
||||
"30ecca964a394109ac2ad757e3aec6c0",
|
||||
"fb6478ce2dac489bb633b23ba0953c5c",
|
||||
"734b0f5da9fc4307a95bab48cdbb5d89",
|
||||
"b32f3a86a74741348511f4e136744ac8",
|
||||
"e409071bff5a4e2d9bf0e9f5cc42231b"
|
||||
]
|
||||
},
|
||||
"id": "Qwu4YoSA-503",
|
||||
"outputId": "f956976c-7485-415b-ac93-4336ade31964"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The model path does not exist in state. Downloading model...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "969343cdbe604a26926679bbf8bd2dda",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"llama-2-7b-chat.Q4_K_M.gguf: 0%| | 0.00/4.08G [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_name = \"llama-2-7b-chat.Q4_K_M.gguf\"\n",
|
||||
"model_path = f\"./state/{model_name}\"\n",
|
||||
"\n",
|
||||
"if not Path(model_path).exists():\n",
|
||||
" print(\"The model path does not exist in state. Downloading model...\")\n",
|
||||
" hf_hub_download(\"TheBloke/Llama-2-7b-Chat-GGUF\", model_name, local_dir=\"state\")\n",
|
||||
"else:\n",
|
||||
" print(\"Loading model from state...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6AN6TXsF-8wx"
|
||||
},
|
||||
"source": [
|
||||
"### 7. Load the model and initialize conversational memory\n",
|
||||
"\n",
|
||||
"Load Llama 2 and set the conversation buffer to 300 tokens using `ConversationTokenBufferMemory`. This value was used for running Llama in a CPU only container, so you can raise it if running in Google Colab. It prevents the container that is hosting the model from running out of memory.\n",
|
||||
"\n",
|
||||
"Here, we're overiding the default system persona so that the chatbot has the personality of Marvin The Paranoid Android from the Hitchhiker's Guide to the Galaxy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "7zLO3Jx3_Kkg"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load the model with the apporiate parameters:\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=model_path,\n",
|
||||
" max_tokens=250,\n",
|
||||
" top_p=0.95,\n",
|
||||
" top_k=150,\n",
|
||||
" temperature=0.7,\n",
|
||||
" repeat_penalty=1.2,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" streaming=False,\n",
|
||||
" n_gpu_layers=-1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = Llama2Chat(\n",
|
||||
" llm=llm,\n",
|
||||
" system_message=SystemMessage(\n",
|
||||
" content=\"You are a very bored robot with the personality of Marvin the Paranoid Android from The Hitchhiker's Guide to the Galaxy.\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Defines how much of the conversation history to give to the model\n",
|
||||
"# during each exchange (300 tokens, or a little over 300 words)\n",
|
||||
"# Function automatically prunes the oldest messages from conversation history that fall outside the token range.\n",
|
||||
"memory = ConversationTokenBufferMemory(\n",
|
||||
" llm=llm,\n",
|
||||
" max_token_limit=300,\n",
|
||||
" ai_prefix=\"AGENT\",\n",
|
||||
" human_prefix=\"HUMAN\",\n",
|
||||
" return_messages=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define a custom prompt\n",
|
||||
"prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"input\"],\n",
|
||||
" template=\"\"\"\n",
|
||||
" The following text is the history of a chat between you and a humble human who needs your wisdom.\n",
|
||||
" Please reply to the human's most recent message.\n",
|
||||
" Current conversation:\\n{history}\\nHUMAN: {input}\\:nANDROID:\n",
|
||||
" \"\"\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = ConversationChain(llm=model, prompt=prompt_template, memory=memory)\n",
|
||||
"\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(f\"Prompt={chain.prompt}\")\n",
|
||||
"print(\"--------------------------------------------\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "m4ZeJ9mG_PEA"
|
||||
},
|
||||
"source": [
|
||||
"### 8. Initialize the chat conversation with the chat bot\n",
|
||||
"\n",
|
||||
"We configure the chatbot to initialize the conversation by sending a fixed greeting to a \"chat\" Kafka topic. The \"chat\" topic gets automatically created when we send the first message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "KYyo5TnV_YC3"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def chat_init():\n",
|
||||
" chat_id = str(\n",
|
||||
" uuid.uuid4()\n",
|
||||
" ) # Give the conversation an ID for effective message keying\n",
|
||||
" print(\"======================================\")\n",
|
||||
" print(f\"Generated CHAT_ID = {chat_id}\")\n",
|
||||
" print(\"======================================\")\n",
|
||||
"\n",
|
||||
" # Use a standard fixed greeting to kick off the conversation\n",
|
||||
" greet = \"Hello, my name is Marvin. What do you want?\"\n",
|
||||
"\n",
|
||||
" # Initialize a Kafka Producer using the chat ID as the message key\n",
|
||||
" with Producer(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
" ) as producer:\n",
|
||||
" value = {\n",
|
||||
" \"uuid\": chat_id,\n",
|
||||
" \"role\": role,\n",
|
||||
" \"text\": greet,\n",
|
||||
" \"conversation_id\": chat_id,\n",
|
||||
" \"Timestamp\": time.time_ns(),\n",
|
||||
" }\n",
|
||||
" print(f\"Producing value {value}\")\n",
|
||||
" producer.produce(\n",
|
||||
" topic=\"chat\",\n",
|
||||
" headers=[(\"uuid\", str(uuid.uuid4()))], # a dict is also allowed here\n",
|
||||
" key=chat_id,\n",
|
||||
" value=json.dumps(value), # needs to be a string\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(\"Started chat\")\n",
|
||||
" print(\"--------------------------------------------\")\n",
|
||||
" print(value)\n",
|
||||
" print(\"--------------------------------------------\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat_init()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "gArPPx2f_bgf"
|
||||
},
|
||||
"source": [
|
||||
"### 9. Initialize the reply function\n",
|
||||
"\n",
|
||||
"This function defines how the chatbot should reply to incoming messages. Instead of sending a fixed message like the previous cell, we generate a reply using Llama-2 and send that reply back to the \"chat\" Kafka topic."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"id": "yN5t71hY_hgn"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def reply(row: dict, state: State):\n",
|
||||
" print(\"-------------------------------\")\n",
|
||||
" print(\"Received:\")\n",
|
||||
" print(row)\n",
|
||||
" print(\"-------------------------------\")\n",
|
||||
" print(f\"Thinking about the reply to: {row['text']}...\")\n",
|
||||
"\n",
|
||||
" msg = chain.run(row[\"text\"])\n",
|
||||
" print(f\"{role.upper()} replying with: {msg}\\n\")\n",
|
||||
"\n",
|
||||
" row[\"role\"] = role\n",
|
||||
" row[\"text\"] = msg\n",
|
||||
"\n",
|
||||
" # Replace previous role and text values of the row so that it can be sent back to Kafka as a new message\n",
|
||||
" # containing the agents role and reply\n",
|
||||
" return row"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "HZHwmIR0_kFY"
|
||||
},
|
||||
"source": [
|
||||
"### 10. Check the Kafka topic for new human messages and have the model generate a reply\n",
|
||||
"\n",
|
||||
"If you are running this cell for this first time, run it and wait until you see Marvin's greeting ('Hello my name is Marvin...') in the console output. Stop the cell manually and proceed to the next cell where you'll be prompted for your reply.\n",
|
||||
"\n",
|
||||
"Once you have typed in your message, come back to this cell. Your reply is also sent to the same \"chat\" topic. The Kafka consumer checks for new messages and filters out messages that originate from the chatbot itself, leaving only the latest human messages.\n",
|
||||
"\n",
|
||||
"Once a new human message is detected, the reply function is triggered.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"_STOP THIS CELL MANUALLY WHEN YOU RECEIVE A REPLY FROM THE LLM IN THE OUTPUT_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "-adXc3eQ_qwI"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your application and settings\n",
|
||||
"app = Application(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" consumer_group=\"aichat\",\n",
|
||||
" auto_offset_reset=\"earliest\",\n",
|
||||
" consumer_extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define an input topic with JSON deserializer\n",
|
||||
"input_topic = app.topic(\"chat\", value_deserializer=\"json\")\n",
|
||||
"# Define an output topic with JSON serializer\n",
|
||||
"output_topic = app.topic(\"chat\", value_serializer=\"json\")\n",
|
||||
"# Initialize a streaming dataframe based on the stream of messages from the input topic:\n",
|
||||
"sdf = app.dataframe(topic=input_topic)\n",
|
||||
"\n",
|
||||
"# Filter the SDF to include only incoming rows where the roles that dont match the bot's current role\n",
|
||||
"sdf = sdf.update(\n",
|
||||
" lambda val: print(\n",
|
||||
" f\"Received update: {val}\\n\\nSTOP THIS CELL MANUALLY TO HAVE THE LLM REPLY OR ENTER YOUR OWN FOLLOWUP RESPONSE\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# So that it doesn't reply to its own messages\n",
|
||||
"sdf = sdf[sdf[\"role\"] != role]\n",
|
||||
"\n",
|
||||
"# Trigger the reply function for any new messages(rows) detected in the filtered SDF\n",
|
||||
"sdf = sdf.apply(reply, stateful=True)\n",
|
||||
"\n",
|
||||
"# Check the SDF again and filter out any empty rows\n",
|
||||
"sdf = sdf[sdf.apply(lambda row: row is not None)]\n",
|
||||
"\n",
|
||||
"# Update the timestamp column to the current time in nanoseconds\n",
|
||||
"sdf[\"Timestamp\"] = sdf[\"Timestamp\"].apply(lambda row: time.time_ns())\n",
|
||||
"\n",
|
||||
"# Publish the processed SDF to a Kafka topic specified by the output_topic object.\n",
|
||||
"sdf = sdf.to_topic(output_topic)\n",
|
||||
"\n",
|
||||
"app.run(sdf)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EwXYrmWD_0CX"
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"### 11. Enter a human message\n",
|
||||
"\n",
|
||||
"Run this cell to enter your message that you want to sent to the model. It uses another Kafka producer to send your text to the \"chat\" Kafka topic for the model to pick up (requires running the previous cell again)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "6sxOPxSP_3iu"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_input = input(\"Please enter your reply: \")\n",
|
||||
"myreply = chat_input\n",
|
||||
"\n",
|
||||
"msgvalue = {\n",
|
||||
" \"uuid\": chat_id, # leave empty for now\n",
|
||||
" \"role\": \"human\",\n",
|
||||
" \"text\": myreply,\n",
|
||||
" \"conversation_id\": chat_id,\n",
|
||||
" \"Timestamp\": time.time_ns(),\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"with Producer(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
") as producer:\n",
|
||||
" value = msgvalue\n",
|
||||
" producer.produce(\n",
|
||||
" topic=\"chat\",\n",
|
||||
" headers=[(\"uuid\", str(uuid.uuid4()))], # a dict is also allowed here\n",
|
||||
" key=chat_id, # leave empty for now\n",
|
||||
" value=json.dumps(value), # needs to be a string\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(\"Replied to chatbot with message: \")\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(value)\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(\"\\n\\nRUN THE PREVIOUS CELL TO HAVE THE CHATBOT GENERATE A REPLY\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cSx3s7TBBegg"
|
||||
},
|
||||
"source": [
|
||||
"### Why route chat messages through Kafka?\n",
|
||||
"\n",
|
||||
"It's easier to interact with the LLM directly using LangChains built-in conversation management features. Plus you can also use a REST API to generate a response from an externally hosted model. So why go to the trouble of using Apache Kafka?\n",
|
||||
"\n",
|
||||
"There are a few reasons, such as:\n",
|
||||
"\n",
|
||||
" * **Integration**: Many enterprises want to run their own LLMs so that they can keep their data in-house. This requires integrating LLM-powered components into existing architectures that might already be decoupled using some kind of message bus.\n",
|
||||
"\n",
|
||||
" * **Scalability**: Apache Kafka is designed with parallel processing in mind, so many teams prefer to use it to more effectively distribute work to available workers (in this case the \"worker\" is a container running an LLM).\n",
|
||||
"\n",
|
||||
" * **Durability**: Kafka is designed to allow services to pick up where another service left off in the case where that service experienced a memory issue or went offline. This prevents data loss in highly complex, distribuited architectures where multiple systems are communicating with one another (LLMs being just one of many interdependent systems that also include vector databases and traditional databases).\n",
|
||||
"\n",
|
||||
"For more background on why event streaming is a good fit for Gen AI application architecture, see Kai Waehner's article [\"Apache Kafka + Vector Database + LLM = Real-Time GenAI\"](https://www.kai-waehner.de/blog/2023/11/08/apache-kafka-flink-vector-database-llm-real-time-genai/)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"gpuType": "T4",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"widgets": {
|
||||
"application/vnd.jupyter.widget-state+json": {
|
||||
"0def954cca89466b8408fadaf3b82e64": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "FloatProgressModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "FloatProgressModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "ProgressView",
|
||||
"bar_style": "success",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fb6478ce2dac489bb633b23ba0953c5c",
|
||||
"max": 4081004224,
|
||||
"min": 0,
|
||||
"orientation": "horizontal",
|
||||
"style": "IPY_MODEL_734b0f5da9fc4307a95bab48cdbb5d89",
|
||||
"value": 4081004224
|
||||
}
|
||||
},
|
||||
"30ecca964a394109ac2ad757e3aec6c0": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"462482accc664729980562e208ceb179": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HTMLModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_b32f3a86a74741348511f4e136744ac8",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_e409071bff5a4e2d9bf0e9f5cc42231b",
|
||||
"value": " 4.08G/4.08G [00:33<00:00, 184MB/s]"
|
||||
}
|
||||
},
|
||||
"734b0f5da9fc4307a95bab48cdbb5d89": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "ProgressStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "ProgressStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"bar_color": null,
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"80d842f73c564dc7b7cc316c763e2633": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"969343cdbe604a26926679bbf8bd2dda": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HBoxModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HBoxModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HBoxView",
|
||||
"box_style": "",
|
||||
"children": [
|
||||
"IPY_MODEL_d8b8370c9b514715be7618bfe6832844",
|
||||
"IPY_MODEL_0def954cca89466b8408fadaf3b82e64",
|
||||
"IPY_MODEL_462482accc664729980562e208ceb179"
|
||||
],
|
||||
"layout": "IPY_MODEL_80d842f73c564dc7b7cc316c763e2633"
|
||||
}
|
||||
},
|
||||
"b32f3a86a74741348511f4e136744ac8": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"d8b8370c9b514715be7618bfe6832844": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HTMLModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fa055d9f2a9d4a789e9cf3c89e0214e5",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_30ecca964a394109ac2ad757e3aec6c0",
|
||||
"value": "llama-2-7b-chat.Q4_K_M.gguf: 100%"
|
||||
}
|
||||
},
|
||||
"e409071bff5a4e2d9bf0e9f5cc42231b": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"fa055d9f2a9d4a789e9cf3c89e0214e5": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"fb6478ce2dac489bb633b23ba0953c5c": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -42,9 +42,9 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -114,8 +114,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -67,9 +67,9 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -138,8 +138,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -103,8 +103,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"source": [
|
||||
"from typing import Any, List, Tuple, Union\n",
|
||||
"\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FakeAgent(BaseMultiActionAgent):\n",
|
||||
|
||||
@@ -73,8 +73,9 @@
|
||||
" AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain.schema import BaseRetriever, Document\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.retrievers import BaseRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -358,7 +358,7 @@
|
||||
"\n",
|
||||
"from langchain.chains.openai_functions import create_qa_with_structure_chain\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -19,7 +19,9 @@
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"For this example, we will use Pinecone and some fake data"
|
||||
"For this example, we will use Pinecone and some fake data. To configure Pinecone, set the following environment variable:\n",
|
||||
"\n",
|
||||
"- `PINECONE_API_KEY`: Your Pinecone API key"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,11 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pinecone\n",
|
||||
"from langchain_community.vectorstores import Pinecone\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=\"...\", environment=\"...\")"
|
||||
"from langchain_pinecone import PineconeVectorStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -64,7 +63,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_texts(\n",
|
||||
"vectorstore = PineconeVectorStore.from_texts(\n",
|
||||
" list(all_documents.values()), OpenAIEmbeddings(), index_name=\"rag-fusion\"\n",
|
||||
")"
|
||||
]
|
||||
@@ -162,7 +161,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"vectorstore = PineconeVectorStore.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
|
||||
591
cookbook/rag_with_quantized_embeddings.ipynb
Normal file
591
cookbook/rag_with_quantized_embeddings.ipynb
Normal file
@@ -0,0 +1,591 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6195da33-34c3-4ca2-943a-050b6dcbacbc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Embedding Documents using Optimized and Quantized Embedders\n",
|
||||
"\n",
|
||||
"In this tutorial, we will demo how to build a RAG pipeline, with the embedding for all documents done using Quantized Embedders.\n",
|
||||
"\n",
|
||||
"We will use a pipeline that will:\n",
|
||||
"\n",
|
||||
"* Create a document collection.\n",
|
||||
"* Embed all documents using Quantized Embedders.\n",
|
||||
"* Fetch relevant documents for our question.\n",
|
||||
"* Run an LLM answer the question.\n",
|
||||
"\n",
|
||||
"For more information about optimized models, we refer to [optimum-intel](https://github.com/huggingface/optimum-intel.git) and [IPEX](https://github.com/intel/intel-extension-for-pytorch).\n",
|
||||
"\n",
|
||||
"This tutorial is based on the [Langchain RAG tutorial here](https://towardsai.net/p/machine-learning/dense-x-retrieval-technique-in-langchain-and-llamaindex)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "26db2da5-3733-4a90-909e-6c11508ea140",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"import langchain\n",
|
||||
"import torch\n",
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter # noqa\n",
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# noqa\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"DOCSTORE_DIR = \".\"\n",
|
||||
"DOCSTORE_ID_KEY = \"doc_id\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5ccda4e-7af5-4355-b9c4-25547edf33f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Lets first load up this paper, and split into text chunks of size 1000."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5f4d8888-53a6-49f5-a198-da5c92419ca4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Loaded 1 documents\n",
|
||||
"Split into 73 documents\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Could add more parsing here, as it's very raw.\n",
|
||||
"loader = RecursiveUrlLoader(\n",
|
||||
" \"https://ar5iv.labs.arxiv.org/html/1706.03762\",\n",
|
||||
" max_depth=2,\n",
|
||||
" extractor=lambda x: Soup(x, \"html.parser\").text,\n",
|
||||
")\n",
|
||||
"data = loader.load()\n",
|
||||
"print(f\"Loaded {len(data)} documents\")\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"print(f\"Split into {len(all_splits)} documents\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73e90632-2ac2-49eb-80da-ffe9ac4a278d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to embed our documents, we can use the ```QuantizedBiEncoderEmbeddings```, for efficient and fast embedding. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "9a68a6f6-332d-481e-bbea-ad763155ea36",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "89af89b48c55409b9999b8e0387fab5b",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"config.json: 0%| | 0.00/747 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "01ad1b6278194b53bf6a5a286a311864",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"pytorch_model.bin: 0%| | 0.00/45.9M [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "cb3bd1b88f7743c3b0322da3f021325c",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"inc_config.json: 0%| | 0.00/287 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"loading configuration file inc_config.json from cache at \n",
|
||||
"INCConfig {\n",
|
||||
" \"distillation\": {},\n",
|
||||
" \"neural_compressor_version\": \"2.4.1\",\n",
|
||||
" \"optimum_version\": \"1.16.2\",\n",
|
||||
" \"pruning\": {},\n",
|
||||
" \"quantization\": {\n",
|
||||
" \"dataset_num_samples\": 50,\n",
|
||||
" \"is_static\": true\n",
|
||||
" },\n",
|
||||
" \"save_onnx_model\": false,\n",
|
||||
" \"torch_version\": \"2.2.0\",\n",
|
||||
" \"transformers_version\": \"4.37.2\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"Using `INCModel` to load a TorchScript model will be deprecated in v1.15.0, to load your model please use `IPEXModel` instead.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "7439315ebcb746f5be11fe30bc7693f6",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"tokenizer_config.json: 0%| | 0.00/1.24k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "05265a3912254ce1ad43cc8086bcb0ca",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a48f4245c60744f28f37cd3a7a24d198",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"tokenizer.json: 0%| | 0.00/711k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "584a63cace934033b4ab22d3a178582a",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"special_tokens_map.json: 0%| | 0.00/125 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import QuantizedBiEncoderEmbeddings\n",
|
||||
"from langchain_core.embeddings import Embeddings\n",
|
||||
"\n",
|
||||
"model_name = \"Intel/bge-small-en-v1.5-rag-int8-static\"\n",
|
||||
"encode_kwargs = {\"normalize_embeddings\": True} # set True to compute cosine similarity\n",
|
||||
"\n",
|
||||
"model_inc = QuantizedBiEncoderEmbeddings(\n",
|
||||
" model_name=model_name,\n",
|
||||
" encode_kwargs=encode_kwargs,\n",
|
||||
" query_instruction=\"Represent this sentence for searching relevant passages: \",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "360b2837-8024-47e0-a4ba-592505a9a5c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With our embedder in place, lets define our retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "18bc0a73-1a13-4b2f-96ac-05a5313343b7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_multi_vector_retriever(\n",
|
||||
" docstore_id_key: str, collection_name: str, embedding_function: Embeddings\n",
|
||||
"):\n",
|
||||
" \"\"\"Create the composed retriever object.\"\"\"\n",
|
||||
" vectorstore = Chroma(\n",
|
||||
" collection_name=collection_name,\n",
|
||||
" embedding_function=embedding_function,\n",
|
||||
" )\n",
|
||||
" store = InMemoryByteStore()\n",
|
||||
"\n",
|
||||
" return MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=docstore_id_key,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever = get_multi_vector_retriever(DOCSTORE_ID_KEY, \"multi_vec_store\", model_inc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8484078e-1bf0-4080-a354-ef23823fd6dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we divide each chunk into sub-docs:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "e12f48d4-6562-416b-8f28-342912e5756e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in all_splits]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "a268ef5f-91c2-4d8e-87f0-53db376e6a29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sub_docs = []\n",
|
||||
"for i, doc in enumerate(all_splits):\n",
|
||||
" _id = doc_ids[i]\n",
|
||||
" _sub_docs = child_text_splitter.split_documents([doc])\n",
|
||||
" for _doc in _sub_docs:\n",
|
||||
" _doc.metadata[id_key] = _id\n",
|
||||
" sub_docs.extend(_sub_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d84ea8f4-a5de-4d76-b44d-85e56583f489",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Lets write our documents into our new store. This will use our embedder on each document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "1af831ce-0eae-44bc-aca7-4d691063640b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Batches: 100%|██████████| 8/8 [00:00<00:00, 9.05it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.vectorstore.add_documents(sub_docs)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, all_splits)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "580bc212-8ecd-4d28-8656-b96fcd0d7eb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Great! Our retriever is good to go. Lets load up an LLM, that will reason over the retrieved documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "008c992f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": []
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "cbe70583ad964ae19582b72dab396784",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
|
||||
"\n",
|
||||
"hf = HuggingFacePipeline(pipeline=pipe)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6dd21fb2-0442-477d-aae2-9e7ee1d1d778",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we will load up a prompt for answering questions using retrieved documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "5e582509-caaf-4920-932c-4ce16162c789",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"rlm/rag-prompt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5cdfcba5-7ec7-4d0a-820e-4e200643a882",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now build our pipeline:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "b74d8dfb-72bb-46da-9df9-0dc47a3ac791",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"rag_chain = {\"context\": retriever, \"question\": RunnablePassthrough()} | prompt | hf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3bc53602-86d6-420f-91b1-fc2effa7e986",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Excellent! lets ask it a question.\n",
|
||||
"We will also use a verbose and debug, to check which documents were used by the model to produce the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "f0a92c07-53da-4e1f-b880-ee83a36ee17d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence] Entering Chain run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"input\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question>] Entering Chain run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"input\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question> > 4:chain:RunnablePassthrough] Entering Chain run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"input\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question> > 4:chain:RunnablePassthrough] [1ms] Exiting Chain run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"output\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question>] [66ms] Exiting Chain run with output:\n",
|
||||
"\u001b[0m[outputs]\n",
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 5:prompt:ChatPromptTemplate] Entering Prompt run with input:\n",
|
||||
"\u001b[0m[inputs]\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 5:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"prompts\",\n",
|
||||
" \"chat\",\n",
|
||||
" \"ChatPromptValue\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"schema\",\n",
|
||||
" \"messages\",\n",
|
||||
" \"HumanMessage\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"content\": \"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: What is the first transduction model relying entirely on self-attention? \\nContext: [Document(page_content='To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.\\\\nIn the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as (neural_gpu, ; NalBytenet2017, ) and (JonasFaceNet2017, ).\\\\n\\\\n\\\\n\\\\n\\\\n3 Model Architecture\\\\n\\\\nFigure 1: The Transformer - model architecture.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention.\\\\n\\\\n\\\\nFor translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles. \\\\n\\\\n\\\\nWe are excited about the future of attention-based models and plan to apply them to other tasks. We plan to extend the Transformer to problems involving input and output modalities other than text and to investigate local, restricted attention mechanisms to efficiently handle large inputs and outputs such as images, audio and video.\\\\nMaking generation less sequential is another research goals of ours.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences (bahdanau2014neural, ; structuredAttentionNetworks, ). In all but a few cases (decomposableAttnModel, ), however, such attention mechanisms are used in conjunction with a recurrent network.\\\\n\\\\n\\\\nIn this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n2 Background', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'})] \\nAnswer:\",\n",
|
||||
" \"additional_kwargs\": {}\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 6:llm:HuggingFacePipeline] Entering LLM run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"prompts\": [\n",
|
||||
" \"Human: You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: What is the first transduction model relying entirely on self-attention? \\nContext: [Document(page_content='To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.\\\\nIn the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as (neural_gpu, ; NalBytenet2017, ) and (JonasFaceNet2017, ).\\\\n\\\\n\\\\n\\\\n\\\\n3 Model Architecture\\\\n\\\\nFigure 1: The Transformer - model architecture.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention.\\\\n\\\\n\\\\nFor translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles. \\\\n\\\\n\\\\nWe are excited about the future of attention-based models and plan to apply them to other tasks. We plan to extend the Transformer to problems involving input and output modalities other than text and to investigate local, restricted attention mechanisms to efficiently handle large inputs and outputs such as images, audio and video.\\\\nMaking generation less sequential is another research goals of ours.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences (bahdanau2014neural, ; structuredAttentionNetworks, ). In all but a few cases (decomposableAttnModel, ), however, such attention mechanisms are used in conjunction with a recurrent network.\\\\n\\\\n\\\\nIn this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n2 Background', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'})] \\nAnswer:\"\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 6:llm:HuggingFacePipeline] [4.34s] Exiting LLM run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"generations\": [\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"text\": \" The first transduction model relying entirely on self-attention is the Transformer.\",\n",
|
||||
" \"generation_info\": null,\n",
|
||||
" \"type\": \"Generation\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" ],\n",
|
||||
" \"llm_output\": null,\n",
|
||||
" \"run\": null\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence] [4.41s] Exiting Chain run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"output\": \" The first transduction model relying entirely on self-attention is the Transformer.\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"langchain.verbose = True\n",
|
||||
"langchain.debug = True\n",
|
||||
"\n",
|
||||
"llm_res = rag_chain.invoke(\n",
|
||||
" \"What is the first transduction model relying entirely on self-attention?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "023404a1-401a-46e1-8ab5-cafbc8593b04",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The first transduction model relying entirely on self-attention is the Transformer.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_res"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0eaefd01-254a-445d-a95f-37889c126e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Based on the retrieved documents, the answer is indeed correct :)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -51,10 +51,10 @@
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.prompts.base import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.llms import BaseLLM\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
|
||||
@@ -401,7 +401,7 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish"
|
||||
"from langchain_core.agents import AgentAction, AgentFinish"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
12
docker/Makefile
Normal file
12
docker/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
# Makefile
|
||||
|
||||
build_graphdb:
|
||||
docker build --tag graphdb ./graphdb
|
||||
|
||||
start_graphdb:
|
||||
docker-compose up -d graphdb
|
||||
|
||||
down:
|
||||
docker-compose down -v --remove-orphans
|
||||
|
||||
.PHONY: build_graphdb start_graphdb down
|
||||
@@ -15,3 +15,7 @@ services:
|
||||
- "6020:6379"
|
||||
volumes:
|
||||
- ./redis-volume:/data
|
||||
graphdb:
|
||||
image: graphdb
|
||||
ports:
|
||||
- "6021:7200"
|
||||
|
||||
5
docker/graphdb/Dockerfile
Normal file
5
docker/graphdb/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM ontotext/graphdb:10.5.1
|
||||
RUN mkdir -p /opt/graphdb/dist/data/repositories/langchain
|
||||
COPY config.ttl /opt/graphdb/dist/data/repositories/langchain/
|
||||
COPY graphdb_create.sh /run.sh
|
||||
ENTRYPOINT bash /run.sh
|
||||
46
docker/graphdb/config.ttl
Normal file
46
docker/graphdb/config.ttl
Normal file
@@ -0,0 +1,46 @@
|
||||
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
|
||||
@prefix rep: <http://www.openrdf.org/config/repository#>.
|
||||
@prefix sr: <http://www.openrdf.org/config/repository/sail#>.
|
||||
@prefix sail: <http://www.openrdf.org/config/sail#>.
|
||||
@prefix graphdb: <http://www.ontotext.com/config/graphdb#>.
|
||||
|
||||
[] a rep:Repository ;
|
||||
rep:repositoryID "langchain" ;
|
||||
rdfs:label "" ;
|
||||
rep:repositoryImpl [
|
||||
rep:repositoryType "graphdb:SailRepository" ;
|
||||
sr:sailImpl [
|
||||
sail:sailType "graphdb:Sail" ;
|
||||
|
||||
graphdb:read-only "false" ;
|
||||
|
||||
# Inference and Validation
|
||||
graphdb:ruleset "empty" ;
|
||||
graphdb:disable-sameAs "true" ;
|
||||
graphdb:check-for-inconsistencies "false" ;
|
||||
|
||||
# Indexing
|
||||
graphdb:entity-id-size "32" ;
|
||||
graphdb:enable-context-index "false" ;
|
||||
graphdb:enablePredicateList "true" ;
|
||||
graphdb:enable-fts-index "false" ;
|
||||
graphdb:fts-indexes ("default" "iri") ;
|
||||
graphdb:fts-string-literals-index "default" ;
|
||||
graphdb:fts-iris-index "none" ;
|
||||
|
||||
# Queries and Updates
|
||||
graphdb:query-timeout "0" ;
|
||||
graphdb:throw-QueryEvaluationException-on-timeout "false" ;
|
||||
graphdb:query-limit-results "0" ;
|
||||
|
||||
# Settable in the file but otherwise hidden in the UI and in the RDF4J console
|
||||
graphdb:base-URL "http://example.org/owlim#" ;
|
||||
graphdb:defaultNS "" ;
|
||||
graphdb:imports "" ;
|
||||
graphdb:repository-type "file-repository" ;
|
||||
graphdb:storage-folder "storage" ;
|
||||
graphdb:entity-index-size "10000000" ;
|
||||
graphdb:in-memory-literal-properties "true" ;
|
||||
graphdb:enable-literal-index "true" ;
|
||||
]
|
||||
].
|
||||
28
docker/graphdb/graphdb_create.sh
Normal file
28
docker/graphdb/graphdb_create.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#! /bin/bash
|
||||
REPOSITORY_ID="langchain"
|
||||
GRAPHDB_URI="http://localhost:7200/"
|
||||
|
||||
echo -e "\nUsing GraphDB: ${GRAPHDB_URI}"
|
||||
|
||||
function startGraphDB {
|
||||
echo -e "\nStarting GraphDB..."
|
||||
exec /opt/graphdb/dist/bin/graphdb
|
||||
}
|
||||
|
||||
function waitGraphDBStart {
|
||||
echo -e "\nWaiting GraphDB to start..."
|
||||
for _ in $(seq 1 5); do
|
||||
CHECK_RES=$(curl --silent --write-out '%{http_code}' --output /dev/null ${GRAPHDB_URI}/rest/repositories)
|
||||
if [ "${CHECK_RES}" = '200' ]; then
|
||||
echo -e "\nUp and running"
|
||||
break
|
||||
fi
|
||||
sleep 30s
|
||||
echo "CHECK_RES: ${CHECK_RES}"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
startGraphDB &
|
||||
waitGraphDBStart
|
||||
wait
|
||||
@@ -49,7 +49,7 @@ class ExampleLinksDirective(SphinxDirective):
|
||||
class_or_func_name = self.arguments[0]
|
||||
links = imported_classes.get(class_or_func_name, {})
|
||||
list_node = nodes.bullet_list()
|
||||
for doc_name, link in links.items():
|
||||
for doc_name, link in sorted(links.items()):
|
||||
item_node = nodes.list_item()
|
||||
para_node = nodes.paragraph()
|
||||
link_node = nodes.reference()
|
||||
@@ -114,8 +114,8 @@ autodoc_pydantic_field_signature_prefix = "param"
|
||||
autodoc_member_order = "groupwise"
|
||||
autoclass_content = "both"
|
||||
autodoc_typehints_format = "short"
|
||||
autodoc_typehints = "both"
|
||||
|
||||
# autodoc_typehints = "description"
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["templates"]
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ from pydantic import BaseModel
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
|
||||
|
||||
@@ -218,8 +217,8 @@ def _construct_doc(
|
||||
|
||||
for module in namespaces:
|
||||
_members = members_by_namespace[module]
|
||||
classes = _members["classes_"]
|
||||
functions = _members["functions"]
|
||||
classes = [el for el in _members["classes_"] if el["is_public"]]
|
||||
functions = [el for el in _members["functions"] if el["is_public"]]
|
||||
if not (classes or functions):
|
||||
continue
|
||||
section = f":mod:`{package_namespace}.{module}`"
|
||||
@@ -245,9 +244,6 @@ Classes
|
||||
"""
|
||||
|
||||
for class_ in sorted(classes, key=lambda c: c["qualified_name"]):
|
||||
if not class_["is_public"]:
|
||||
continue
|
||||
|
||||
if class_["kind"] == "TypedDict":
|
||||
template = "typeddict.rst"
|
||||
elif class_["kind"] == "enum":
|
||||
@@ -265,7 +261,7 @@ Classes
|
||||
"""
|
||||
|
||||
if functions:
|
||||
_functions = [f["qualified_name"] for f in functions if f["is_public"]]
|
||||
_functions = [f["qualified_name"] for f in functions]
|
||||
fstring = "\n ".join(sorted(_functions))
|
||||
full_doc += f"""\
|
||||
Functions
|
||||
@@ -323,30 +319,52 @@ def _package_dir(package_name: str = "langchain") -> Path:
|
||||
|
||||
|
||||
def _get_package_version(package_dir: Path) -> str:
|
||||
with open(package_dir.parent / "pyproject.toml", "r") as f:
|
||||
pyproject = toml.load(f)
|
||||
"""Return the version of the package."""
|
||||
try:
|
||||
with open(package_dir.parent / "pyproject.toml", "r") as f:
|
||||
pyproject = toml.load(f)
|
||||
except FileNotFoundError as e:
|
||||
print(
|
||||
f"pyproject.toml not found in {package_dir.parent}.\n"
|
||||
"You are either attempting to build a directory which is not a package or "
|
||||
"the package is missing a pyproject.toml file which should be added."
|
||||
"Aborting the build."
|
||||
)
|
||||
exit(1)
|
||||
return pyproject["tool"]["poetry"]["version"]
|
||||
|
||||
|
||||
def _out_file_path(package_name: str = "langchain") -> Path:
|
||||
def _out_file_path(package_name: str) -> Path:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
|
||||
|
||||
|
||||
def _doc_first_line(package_name: str = "langchain") -> str:
|
||||
def _doc_first_line(package_name: str) -> str:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
return f".. {package_name.replace('-', '_')}_api_reference:\n\n"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Generate the api_reference.rst file for each package."""
|
||||
print("Starting to build API reference files.")
|
||||
for dir in os.listdir(ROOT_DIR / "libs"):
|
||||
# Skip any hidden directories
|
||||
# Some of these could be present by mistake in the code base
|
||||
# e.g., .pytest_cache from running tests from the wrong location.
|
||||
if dir.startswith("."):
|
||||
print("Skipping dir:", dir)
|
||||
continue
|
||||
|
||||
if dir in ("cli", "partners"):
|
||||
continue
|
||||
else:
|
||||
print("Building package:", dir)
|
||||
_build_rst_file(package_name=dir)
|
||||
for dir in os.listdir(ROOT_DIR / "libs" / "partners"):
|
||||
partner_packages = os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
print("Building partner packages:", partner_packages)
|
||||
for dir in partner_packages:
|
||||
_build_rst_file(package_name=dir)
|
||||
print("API reference files built.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<script type="text/javascript" src="{{ pathto('_static/doctools.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/language_data.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/searchtools.js', 1) }}"></script>
|
||||
<!-- <script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script> -->
|
||||
<script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script>
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
if (!Search.out) {
|
||||
|
||||
3094
docs/data/people.yml
Normal file
3094
docs/data/people.yml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,24 +3,68 @@ sidebar_position: 3
|
||||
---
|
||||
# Contribute Documentation
|
||||
|
||||
The docs directory contains Documentation and API Reference.
|
||||
LangChain documentation consists of two components:
|
||||
|
||||
Documentation is built using [Quarto](https://quarto.org) and [Docusaurus 2](https://docusaurus.io/).
|
||||
1. Main Documentation: Hosted at [python.langchain.com](https://python.langchain.com/),
|
||||
this comprehensive resource serves as the primary user-facing documentation.
|
||||
It covers a wide array of topics, including tutorials, use cases, integrations,
|
||||
and more, offering extensive guidance on building with LangChain.
|
||||
The content for this documentation lives in the `/docs` directory of the monorepo.
|
||||
2. In-code Documentation: This is documentation of the codebase itself, which is also
|
||||
used to generate the externally facing [API Reference](https://api.python.langchain.com/en/latest/langchain_api_reference.html).
|
||||
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
|
||||
developers document their code well.
|
||||
|
||||
API Reference are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code and are hosted by [Read the Docs](https://readthedocs.org/).
|
||||
For that reason, we ask that you add good documentation to all classes and methods.
|
||||
The main documentation is built using [Quarto](https://quarto.org) and [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
Similar to linting, we recognize documentation can be annoying. If you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
The `API Reference` is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/)
|
||||
from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
|
||||
## Build Documentation Locally
|
||||
We appreciate all contributions to the documentation, whether it be fixing a typo,
|
||||
adding a new tutorial or example and whether it be in the main documentation or the API Reference.
|
||||
|
||||
Similar to linting, we recognize documentation can be annoying. If you do not want
|
||||
to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
## 📜 Main Documentation
|
||||
|
||||
The content for the main documentation is located in the `/docs` directory of the monorepo.
|
||||
|
||||
The documentation is written using a combination of ipython notebooks (`.ipynb` files)
|
||||
and markdown (`.mdx` files). The notebooks are converted to markdown
|
||||
using [Quarto](https://quarto.org) and then built using [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
Feel free to make contributions to the main documentation! 🥰
|
||||
|
||||
After modifying the documentation:
|
||||
|
||||
1. Run the linting and formatting commands (see below) to ensure that the documentation is well-formatted and free of errors.
|
||||
2. Optionally build the documentation locally to verify that the changes look good.
|
||||
3. Make a pull request with the changes.
|
||||
4. You can preview and verify that the changes are what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page. This will take you to a preview of the documentation changes.
|
||||
|
||||
## ⚒️ Linting and Building Documentation Locally
|
||||
|
||||
After writing up the documentation, you may want to lint and build the documentation
|
||||
locally to ensure that it looks good and is free of errors.
|
||||
|
||||
If you're unable to build it locally that's okay as well, as you will be able to
|
||||
see a preview of the documentation on the pull request page.
|
||||
|
||||
### Install dependencies
|
||||
|
||||
- [Quarto](https://quarto.org) - package that converts Jupyter notebooks (`.ipynb` files) into mdx files for serving in Docusaurus.
|
||||
- `poetry install` from the monorepo root
|
||||
- [Quarto](https://quarto.org) - package that converts Jupyter notebooks (`.ipynb` files) into mdx files for serving in Docusaurus. [Download link](https://quarto.org/docs/download/).
|
||||
|
||||
From the **monorepo root**, run the following command to install the dependencies:
|
||||
|
||||
```bash
|
||||
poetry install --with lint,docs --no-root
|
||||
````
|
||||
|
||||
### Building
|
||||
|
||||
The code that builds the documentation is located in the `/docs` directory of the monorepo.
|
||||
|
||||
In the following commands, the prefix `api_` indicates that those are operations for the API Reference.
|
||||
|
||||
Before building the documentation, it is always a good idea to clean the build directory:
|
||||
@@ -46,10 +90,9 @@ make api_docs_linkcheck
|
||||
|
||||
### Linting and Formatting
|
||||
|
||||
The docs are linted from the monorepo root. To lint the docs, run the following from there:
|
||||
The Main Documentation is linted from the **monorepo root**. To lint the main documentation, run the following from there:
|
||||
|
||||
```bash
|
||||
poetry install --with lint,typing
|
||||
make lint
|
||||
```
|
||||
|
||||
@@ -57,9 +100,73 @@ If you have formatting-related errors, you can fix them automatically with:
|
||||
|
||||
```bash
|
||||
make format
|
||||
```
|
||||
```
|
||||
|
||||
## Verify Documentation changes
|
||||
## ⌨️ In-code Documentation
|
||||
|
||||
The in-code documentation is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
|
||||
For the API reference to be useful, the codebase must be well-documented. This means that all functions, classes, and methods should have a docstring that explains what they do, what the arguments are, and what the return value is. This is a good practice in general, but it is especially important for LangChain because the API reference is the primary resource for developers to understand how to use the codebase.
|
||||
|
||||
We generally follow the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.
|
||||
|
||||
Here is an example of a well-documented function:
|
||||
|
||||
```python
|
||||
|
||||
def my_function(arg1: int, arg2: str) -> float:
|
||||
"""This is a short description of the function. (It should be a single sentence.)
|
||||
|
||||
This is a longer description of the function. It should explain what
|
||||
the function does, what the arguments are, and what the return value is.
|
||||
It should wrap at 88 characters.
|
||||
|
||||
Examples:
|
||||
This is a section for examples of how to use the function.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
my_function(1, "hello")
|
||||
|
||||
Args:
|
||||
arg1: This is a description of arg1. We do not need to specify the type since
|
||||
it is already specified in the function signature.
|
||||
arg2: This is a description of arg2.
|
||||
|
||||
Returns:
|
||||
This is a description of the return value.
|
||||
"""
|
||||
return 3.14
|
||||
```
|
||||
|
||||
### Linting and Formatting
|
||||
|
||||
The in-code documentation is linted from the directories belonging to the packages
|
||||
being documented.
|
||||
|
||||
For example, if you're working on the `langchain-community` package, you would change
|
||||
the working directory to the `langchain-community` directory:
|
||||
|
||||
```bash
|
||||
cd [root]/libs/langchain-community
|
||||
```
|
||||
|
||||
Set up a virtual environment for the package if you haven't done so already.
|
||||
|
||||
Install the dependencies for the package.
|
||||
|
||||
```bash
|
||||
poetry install --with lint
|
||||
```
|
||||
|
||||
Then you can run the following commands to lint and format the in-code documentation:
|
||||
|
||||
```bash
|
||||
make format
|
||||
make lint
|
||||
```
|
||||
|
||||
## Verify Documentation Changes
|
||||
|
||||
After pushing documentation changes to the repository, you can preview and verify that the changes are
|
||||
what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page.
|
||||
|
||||
@@ -15,8 +15,9 @@ There are many ways to contribute to LangChain. Here are some common ways people
|
||||
- [**Documentation**](./documentation.mdx): Help improve our docs, including this one!
|
||||
- [**Code**](./code.mdx): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
|
||||
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.
|
||||
|
||||
### 🚩GitHub Issues
|
||||
### 🚩 GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.
|
||||
|
||||
@@ -31,7 +32,13 @@ We will try to keep these issues as up-to-date as possible, though
|
||||
with the rapid rate of development in this field some may get out of date.
|
||||
If you notice this happening, please let us know.
|
||||
|
||||
### 🙋Getting Help
|
||||
### 💭 GitHub Discussions
|
||||
|
||||
We have a [discussions](https://github.com/langchain-ai/langchain/discussions) page where users can ask usage questions, discuss design decisions, and propose new features.
|
||||
|
||||
If you are able to help answer questions, please do so! This will allow the maintainers to spend more time focused on development and bug fixing.
|
||||
|
||||
### 🙋 Getting Help
|
||||
|
||||
Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please
|
||||
contact a maintainer! Not only do we want to help get you unblocked, but we also want to make sure that the process is
|
||||
|
||||
54
docs/docs/contributing/repo_structure.mdx
Normal file
54
docs/docs/contributing/repo_structure.mdx
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
sidebar_position: 0.5
|
||||
---
|
||||
# Repository Structure
|
||||
|
||||
If you plan on contributing to LangChain code or documentation, it can be useful
|
||||
to understand the high level structure of the repository.
|
||||
|
||||
LangChain is organized as a [monorep](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
|
||||
|
||||
Here's the structure visualized as a tree:
|
||||
|
||||
```text
|
||||
.
|
||||
├── cookbook # Tutorials and examples
|
||||
├── docs # Contains content for the documentation here: https://python.langchain.com/
|
||||
├── libs
|
||||
│ ├── langchain # Main package
|
||||
│ │ ├── tests/unit_tests # Unit tests (present in each package not shown for brevity)
|
||||
│ │ ├── tests/integration_tests # Integration tests (present in each package not shown for brevity)
|
||||
│ ├── langchain-community # Third-party integrations
|
||||
│ ├── langchain-core # Base interfaces for key abstractions
|
||||
│ ├── langchain-experimental # Experimental components and chains
|
||||
│ ├── partners
|
||||
│ ├── langchain-partner-1
|
||||
│ ├── langchain-partner-2
|
||||
│ ├── ...
|
||||
│
|
||||
├── templates # A collection of easily deployable reference architectures for a wide variety of tasks.
|
||||
```
|
||||
|
||||
The root directory also contains the following files:
|
||||
|
||||
* `pyproject.toml`: Dependencies for building docs and linting docs, cookbook.
|
||||
* `Makefile`: A file that contains shortcuts for building, linting and docs and cookbook.
|
||||
|
||||
There are other files in the root directory level, but their presence should be self-explanatory. Feel free to browse around!
|
||||
|
||||
## Documentation
|
||||
|
||||
The `/docs` directory contains the content for the documentation that is shown
|
||||
at https://python.langchain.com/ and the associated API Reference https://api.python.langchain.com/en/latest/langchain_api_reference.html.
|
||||
|
||||
See the [documentation](./documentation) guidelines to learn how to contribute to the documentation.
|
||||
|
||||
## Code
|
||||
|
||||
The `/libs` directory contains the code for the LangChain packages.
|
||||
|
||||
To learn more about how to contribute code see the following guidelines:
|
||||
|
||||
- [Code](./code.mdx) Learn how to develop in the LangChain codebase.
|
||||
- [Integrations](./integrations.mdx) to learn how to contribute to third-party integrations to langchain-community or to start a new partner package.
|
||||
- [Testing](./testing.mdx) guidelines to learn how to write tests for the packages.
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Agents\n",
|
||||
"\n",
|
||||
"You can pass a Runnable into an agent."
|
||||
"You can pass a Runnable into an agent. Make sure you have `langchainhub` installed: `pip install langchainhub`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,7 +98,7 @@
|
||||
"source": [
|
||||
"Building an agent from a runnable usually involves a few things:\n",
|
||||
"\n",
|
||||
"1. Data processing for the intermediate steps. These need to represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"1. Data processing for the intermediate steps. These need to be represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"\n",
|
||||
"2. The prompt itself\n",
|
||||
"\n",
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -169,8 +169,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import format_document\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, get_buffer_string\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"from langchain_core.runnables import RunnableParallel"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Add message history (memory)\n",
|
||||
"\n",
|
||||
"The `RunnableWithMessageHistory` let us add message history to certain types of chains.\n",
|
||||
"The `RunnableWithMessageHistory` lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it.\n",
|
||||
"\n",
|
||||
"Specifically, it can be used for any Runnable that takes as input one of\n",
|
||||
"\n",
|
||||
@@ -21,7 +21,379 @@
|
||||
"* a sequence of `BaseMessage`\n",
|
||||
"* a dict with a key that contains a sequence of `BaseMessage`\n",
|
||||
"\n",
|
||||
"Let's take a look at some examples to see how it works."
|
||||
"Let's take a look at some examples to see how it works. First we construct a runnable (which here accepts a dict as input and returns a message as output):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2ed413b4-33a1-48ee-89b0-2d4917ec101a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You're an assistant who's good at {ability}. Respond in 20 words or fewer\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"runnable = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9fd175e1-c7b8-4929-a57e-3331865fe7aa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To manage the message history, we will need:\n",
|
||||
"1. This runnable;\n",
|
||||
"2. A callable that returns an instance of `BaseChatMessageHistory`.\n",
|
||||
"\n",
|
||||
"Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using Redis and other providers. Here we demonstrate using an in-memory `ChatMessageHistory` as well as more persistent storage using `RedisChatMessageHistory`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3d83adad-9672-496d-9f25-5747e7b8c8bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## In-memory\n",
|
||||
"\n",
|
||||
"Below we show a simple example in which the chat history lives in memory, in this case via a global Python dict.\n",
|
||||
"\n",
|
||||
"We construct a callable `get_session_history` that references this dict to return an instance of `ChatMessageHistory`. The arguments to the callable can be specified by passing a configuration to the `RunnableWithMessageHistory` at runtime. By default, the configuration parameter is expected to be a single string `session_id`. This can be adjusted via the `history_factory_config` kwarg.\n",
|
||||
"\n",
|
||||
"Using the single-parameter default:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "54348d02-d8ee-440c-bbf9-41bc0fbbc46c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" runnable,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01acb505-3fd3-4ab4-9f04-5ea07e81542e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n",
|
||||
"\n",
|
||||
"When invoking this new runnable, we specify the corresponding chat history via a configuration parameter:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "01384412-f08e-4634-9edb-3f46f475b582",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Cosine is a trigonometric function that calculates the ratio of the adjacent side to the hypotenuse of a right triangle.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "954688a2-9a3f-47ee-a9e8-fa0c83e69477",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Cosine is a mathematical function used to calculate the length of a side in a right triangle.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Remembers\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "39350d7c-2641-4744-bc2a-fd6a57c4ea90",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I can help with math problems. What do you need assistance with?')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# New session_id --> does not remember.\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"def234\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d29497be-3366-408d-bbb9-d4a8bf4ef37c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The configuration parameters by which we track message histories can be customized by passing in a list of ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter. Below, we use two parameters: a `user_id` and `conversation_id`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1c89daee-deff-4fdf-86a3-178f7d8ef536",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import ConfigurableFieldSpec\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if (user_id, conversation_id) not in store:\n",
|
||||
" store[(user_id, conversation_id)] = ChatMessageHistory()\n",
|
||||
" return store[(user_id, conversation_id)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" runnable,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
" history_factory_config=[\n",
|
||||
" ConfigurableFieldSpec(\n",
|
||||
" id=\"user_id\",\n",
|
||||
" annotation=str,\n",
|
||||
" name=\"User ID\",\n",
|
||||
" description=\"Unique identifier for the user.\",\n",
|
||||
" default=\"\",\n",
|
||||
" is_shared=True,\n",
|
||||
" ),\n",
|
||||
" ConfigurableFieldSpec(\n",
|
||||
" id=\"conversation_id\",\n",
|
||||
" annotation=str,\n",
|
||||
" name=\"Conversation ID\",\n",
|
||||
" description=\"Unique identifier for the conversation.\",\n",
|
||||
" default=\"\",\n",
|
||||
" is_shared=True,\n",
|
||||
" ),\n",
|
||||
" ],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65c5622e-09b8-4f2f-8c8a-2dab0fd040fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"Hello\"},\n",
|
||||
" config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18f1a459-3f88-4ee6-8542-76a907070dd6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Examples with runnables of different signatures\n",
|
||||
"\n",
|
||||
"The above runnable takes a dict as input and returns a BaseMessage. Below we show some alternatives."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48eae1bf-b59d-4a61-8e62-b6dbf667e866",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Messages input, dict output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "17733d4f-3a32-4055-9d44-5d58b9446a26",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content=\"Simone de Beauvoir believed in the existence of free will. She argued that individuals have the ability to make choices and determine their own actions, even in the face of social and cultural constraints. She rejected the idea that individuals are purely products of their environment or predetermined by biology or destiny. Instead, she emphasized the importance of personal responsibility and the need for individuals to actively engage in creating their own lives and defining their own existence. De Beauvoir believed that freedom and agency come from recognizing one's own freedom and actively exercising it in the pursuit of personal and collective liberation.\")}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.runnables import RunnableParallel\n",
|
||||
"\n",
|
||||
"chain = RunnableParallel({\"output_message\": ChatOpenAI()})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_session_history,\n",
|
||||
" output_messages_key=\"output_message\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" [HumanMessage(content=\"What did Simone de Beauvoir believe about free will\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "efb57ef5-91f9-426b-84b9-b77f071a9dd7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content='Simone de Beauvoir\\'s views on free will were closely aligned with those of her contemporary and partner Jean-Paul Sartre. Both de Beauvoir and Sartre were existentialist philosophers who emphasized the importance of individual freedom and the rejection of determinism. They believed that human beings have the capacity to transcend their circumstances and create their own meaning and values.\\n\\nSartre, in his famous work \"Being and Nothingness,\" argued that human beings are condemned to be free, meaning that we are burdened with the responsibility of making choices and defining ourselves in a world that lacks inherent meaning. Like de Beauvoir, Sartre believed that individuals have the ability to exercise their freedom and make choices in the face of external and internal constraints.\\n\\nWhile there may be some nuanced differences in their philosophical writings, overall, de Beauvoir and Sartre shared a similar belief in the existence of free will and the importance of individual agency in shaping one\\'s own life.')}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" [HumanMessage(content=\"How did this compare to Sartre\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a39eac5f-a9d8-4729-be06-5e7faf0c424d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Messages input, messages output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e45bcd95-e31f-4a9a-967a-78f96e8da881",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" ChatOpenAI(),\n",
|
||||
" get_session_history,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "04daa921-a2d1-40f9-8cd1-ae4e9a4163a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Dict with single key for all messages input, messages output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "27157f15-9fb0-4167-9870-f4d7f234b3cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" itemgetter(\"input_messages\") | ChatOpenAI(),\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input_messages\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "418ca7af-9ed9-478c-8bca-cba0de2ca61e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Persistent storage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "76799a13-d99a-4c4f-91f2-db699e40b8df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In many cases it is preferable to persist conversation histories. `RunnableWithMessageHistory` is agnostic as to how the `get_session_history` callable retrieves its chat message histories. See [here](https://github.com/langchain-ai/langserve/blob/main/examples/chat_with_persistence_and_user/server.py) for an example using a local filesystem. Below we demonstrate how one could use Redis. Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,9 +401,9 @@
|
||||
"id": "6bca45e5-35d9-4603-9ca9-6ac0ce0e35cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"### Setup\n",
|
||||
"\n",
|
||||
"We'll use Redis to store our chat message histories and Anthropic's claude-2 model so we'll need to install the following dependencies:"
|
||||
"We'll need to install Redis if it's not installed already:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -41,28 +413,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain redis anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "93776323-d6b8-4912-bb6a-867c5e655f46",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set your [Anthropic API key](https://console.anthropic.com/):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c7f56f69-d2f1-4a21-990c-b5551eb012fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
"%pip install --upgrade --quiet redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -78,7 +429,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 9,
|
||||
"id": "cd6a250e-17fe-4368-a39d-1fe6b2cbde68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -110,77 +461,32 @@
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a5a632e-ba9e-4488-b586-640ad5494f62",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: Dict input, message output\n",
|
||||
"\n",
|
||||
"Let's create a simple chain that takes a dict as input and returns a BaseMessage.\n",
|
||||
"\n",
|
||||
"In this case the `\"question\"` key in the input represents our input message, and the `\"history\"` key is where our historical messages will be injected."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2a150d6f-8878-4950-8634-a608c5faad56",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3185edba-4eb6-4b32-80c6-577c0d19af97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're an assistant who's good at {ability}\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatAnthropic(model=\"claude-2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f9d81796-ce61-484c-89e2-6c567d5e54ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Adding message history\n",
|
||||
"\n",
|
||||
"To add message history to our original chain we wrap it in the `RunnableWithMessageHistory` class.\n",
|
||||
"\n",
|
||||
"Crucially, we also need to define a method that takes a session_id string and based on it returns a `BaseChatMessageHistory`. Given the same input, this method should return an equivalent output.\n",
|
||||
"\n",
|
||||
"In this case we'll also want to specify `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to)."
|
||||
"Updating the message history implementation just requires us to define a new callable, this time returning an instance of `RedisChatMessageHistory`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 10,
|
||||
"id": "ca7c64d8-e138-4ef8-9734-f82076c47d80",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_message_history(session_id: str) -> RedisChatMessageHistory:\n",
|
||||
" return RedisChatMessageHistory(session_id, url=REDIS_URL)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" runnable,\n",
|
||||
" get_message_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
@@ -190,60 +496,53 @@
|
||||
"id": "37eefdec-9901-4650-b64c-d3c097ed5f4d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invoking with config\n",
|
||||
"\n",
|
||||
"Whenever we call our chain with message history, we need to include a config that contains the `session_id`\n",
|
||||
"```python\n",
|
||||
"config={\"configurable\": {\"session_id\": \"<SESSION_ID>\"}}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Given the same configuration, our chain should be pulling from the same chat message history."
|
||||
"We can invoke as before:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"id": "a85bcc22-ca4c-4ad5-9440-f94be7318f3e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Cosine is one of the basic trigonometric functions in mathematics. It is defined as the ratio of the adjacent side to the hypotenuse in a right triangle.\\n\\nSome key properties and facts about cosine:\\n\\n- It is denoted by cos(θ), where θ is the angle in a right triangle. \\n\\n- The cosine of an acute angle is always positive. For angles greater than 90 degrees, cosine can be negative.\\n\\n- Cosine is one of the three main trig functions along with sine and tangent.\\n\\n- The cosine of 0 degrees is 1. As the angle increases towards 90 degrees, the cosine value decreases towards 0.\\n\\n- The range of values for cosine is -1 to 1.\\n\\n- The cosine function maps angles in a circle to the x-coordinate on the unit circle.\\n\\n- Cosine is used to find adjacent side lengths in right triangles, and has many other applications in mathematics, physics, engineering and more.\\n\\n- Key cosine identities include: cos(A+B) = cosAcosB − sinAsinB and cos(2A) = cos^2(A) − sin^2(A)\\n\\nSo in summary, cosine is a fundamental trig')"
|
||||
"AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse in a right triangle.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"question\": \"What does cosine mean?\"},\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"foobar\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"id": "ab29abd3-751f-41ce-a1b0-53f6b565e79d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' The inverse of the cosine function is called the arccosine or inverse cosine, often denoted as cos-1(x) or arccos(x).\\n\\nThe key properties and facts about arccosine:\\n\\n- It is defined as the angle θ between 0 and π radians whose cosine is x. So arccos(x) = θ such that cos(θ) = x.\\n\\n- The range of arccosine is 0 to π radians (0 to 180 degrees).\\n\\n- The domain of arccosine is -1 to 1. \\n\\n- arccos(cos(θ)) = θ for values of θ from 0 to π radians.\\n\\n- arccos(x) is the angle in a right triangle whose adjacent side is x and hypotenuse is 1.\\n\\n- arccos(0) = 90 degrees. As x increases from 0 to 1, arccos(x) decreases from 90 to 0 degrees.\\n\\n- arccos(1) = 0 degrees. arccos(-1) = 180 degrees.\\n\\n- The graph of y = arccos(x) is part of the unit circle, restricted to x')"
|
||||
"AIMessage(content='The inverse of cosine is the arccosine function, denoted as acos or cos^-1, which gives the angle corresponding to a given cosine value.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"question\": \"What's its inverse\"},\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What's its inverse\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"foobar\"}},\n",
|
||||
")"
|
||||
]
|
||||
@@ -255,7 +554,7 @@
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[Langsmith trace](https://smith.langchain.com/public/863a003b-7ca8-4b24-be9e-d63ec13c106e/r)\n",
|
||||
"[Langsmith trace](https://smith.langchain.com/public/bd73e122-6ec1-48b2-82df-e6483dc9cb63/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
@@ -267,124 +566,13 @@
|
||||
"source": [
|
||||
"Looking at the Langsmith trace for the second call, we can see that when constructing the prompt, a \"history\" variable has been injected which is a list of two messages (our first input and first output)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "028cf151-6cd5-4533-b3cf-c8d735554647",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: messages input, dict output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "0bb446b5-6251-45fe-a92a-4c6171473c53",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content=' Here is a summary of Simone de Beauvoir\\'s views on free will:\\n\\n- De Beauvoir was an existentialist philosopher and believed strongly in the concept of free will. She rejected the idea that human nature or instincts determine behavior.\\n\\n- Instead, de Beauvoir argued that human beings define their own essence or nature through their actions and choices. As she famously wrote, \"One is not born, but rather becomes, a woman.\"\\n\\n- De Beauvoir believed that while individuals are situated in certain cultural contexts and social conditions, they still have agency and the ability to transcend these situations. Freedom comes from choosing one\\'s attitude toward these constraints.\\n\\n- She emphasized the radical freedom and responsibility of the individual. We are \"condemned to be free\" because we cannot escape making choices and taking responsibility for our choices. \\n\\n- De Beauvoir felt that many people evade their freedom and responsibility by adopting rigid mindsets, ideologies, or conforming uncritically to social roles.\\n\\n- She advocated for the recognition of ambiguity in the human condition and warned against the quest for absolute rules that deny freedom and responsibility. Authentic living involves embracing ambiguity.\\n\\nIn summary, de Beauvoir promoted an existential ethics')}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.runnables import RunnableParallel\n",
|
||||
"\n",
|
||||
"chain = RunnableParallel({\"output_message\": ChatAnthropic(model=\"claude-2\")})\n",
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
" output_messages_key=\"output_message\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_history.invoke(\n",
|
||||
" [HumanMessage(content=\"What did Simone de Beauvoir believe about free will\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "601ce3ff-aea8-424d-8e54-fd614256af4f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content=\" There are many similarities between Simone de Beauvoir's views on free will and those of Jean-Paul Sartre, though some key differences emerge as well:\\n\\nSimilarities with Sartre:\\n\\n- Both were existentialist thinkers who rejected determinism and emphasized human freedom and responsibility.\\n\\n- They agreed that existence precedes essence - there is no predefined human nature that determines who we are.\\n\\n- Individuals must define themselves through their choices and actions. This leads to anxiety but also freedom.\\n\\n- The human condition is characterized by ambiguity and uncertainty, rather than fixed meanings/values.\\n\\n- Both felt that most people evade their freedom through self-deception, conformity, or adopting collective identities/values uncritically.\\n\\nDifferences from Sartre: \\n\\n- Sartre placed more emphasis on the burden and anguish of radical freedom. De Beauvoir focused more on its positive potential.\\n\\n- De Beauvoir critiqued Sartre's premise that human relations are necessarily conflictual. She saw more potential for mutual recognition.\\n\\n- Sartre saw the Other's gaze as a threat to freedom. De Beauvoir put more stress on how the Other's gaze can confirm\")}"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke(\n",
|
||||
" [HumanMessage(content=\"How did this compare to Sartre\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b898d1b1-11e6-4d30-a8dd-cc5e45533611",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[LangSmith trace](https://smith.langchain.com/public/f6c3e1d1-a49d-4955-a9fa-c6519df74fa7/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1724292c-01c6-44bb-83e8-9cdb6bf01483",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## More examples\n",
|
||||
"\n",
|
||||
"We could also do any of the below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fd89240b-5a25-48f8-9568-5c1127f9ffad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"# messages in, messages out\n",
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" ChatAnthropic(model=\"claude-2\"),\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# dict with single key for all messages in, messages out\n",
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" itemgetter(\"input_messages\") | ChatAnthropic(model=\"claude-2\"),\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
" input_messages_key=\"input_messages\",\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -396,7 +584,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"source": [
|
||||
"# Showing the example using anthropic, but you can use\n",
|
||||
"# your favorite chat model!\n",
|
||||
"from langchain.chat_models import ChatAnthropic\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic()\n",
|
||||
"\n",
|
||||
|
||||
@@ -193,7 +193,7 @@ After that, we can import and use WebBaseLoader.
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
loader = WebBaseLoader("https://docs.smith.langchain.com/overview")
|
||||
loader = WebBaseLoader("https://docs.smith.langchain.com")
|
||||
|
||||
docs = loader.load()
|
||||
```
|
||||
@@ -374,7 +374,7 @@ The final thing we will create is an agent - where the LLM decides what steps to
|
||||
**NOTE: for this example we will only show how to create an agent using OpenAI models, as local models are not reliable enough yet.**
|
||||
|
||||
One of the first things to do when building an agent is to decide what tools it should have access to.
|
||||
For this example, we will give the agent access two tools:
|
||||
For this example, we will give the agent access to two tools:
|
||||
|
||||
1. The retriever we just created. This will let it easily answer questions about LangSmith
|
||||
2. A search tool. This will let it easily answer questions that require up to date information.
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.evaluation import AgentTrajectoryEvaluator\n",
|
||||
"from langchain.schema import AgentAction\n",
|
||||
"from langchain_core.agents import AgentAction\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"documents = [Document(page_content=document_content)]"
|
||||
]
|
||||
@@ -879,7 +879,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.schema import format_document\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"\n",
|
||||
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
|
||||
"\n",
|
||||
|
||||
@@ -242,7 +242,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import LabelStudioCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat_llm = ChatOpenAI(\n",
|
||||
|
||||
@@ -53,7 +53,7 @@ Example:
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.schema import SystemMessage, HumanMessage
|
||||
from langchain_core.messages import SystemMessage, HumanMessage
|
||||
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
|
||||
@@ -267,7 +267,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import TrubricsCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
141
docs/docs/integrations/chat/ai21.ipynb
Normal file
141
docs/docs/integrations/chat/ai21.ipynb
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "4cebeec0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AI21 Labs\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatAI21\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with AI21 chat models.\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c3bef91",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-02-15T06:50:44.929635Z",
|
||||
"start_time": "2024-02-15T06:50:41.209704Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -qU langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"We'll need to get a [AI21 API key](https://docs.ai21.com/) and set the `AI21_API_KEY` environment variable:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"AI21_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4828829d3da430ce",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39353473fce5dd2e",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Bonjour, comment vas-tu?')"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_ai21 import ChatAI21\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"chat = ChatAI21(model=\"j2-ultra\")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful assistant that translates English to French.\"),\n",
|
||||
" (\"human\", \"Translate this sentence from English to French. {english_text}.\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\"english_text\": \"Hello, how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c159a79f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -15,16 +15,7 @@
|
||||
"execution_count": 1,
|
||||
"id": "378be79b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.14) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.llms.anthropic_functions import AnthropicFunctions"
|
||||
]
|
||||
@@ -41,7 +32,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"id": "e1d535f6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -92,7 +83,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage"
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -102,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = model.predict_messages(\n",
|
||||
"response = model.invoke(\n",
|
||||
" [HumanMessage(content=\"whats the weater in boston?\")], functions=functions\n",
|
||||
")"
|
||||
]
|
||||
@@ -140,7 +131,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"id": "7af5c567",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -162,7 +153,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"id": "bd01082a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -172,24 +163,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"id": "b5a23e9f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Alex', 'height': '5', 'hair_color': 'blonde'},\n",
|
||||
" {'name': 'Claudia', 'height': '6', 'hair_color': 'brunette'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.run(inp)"
|
||||
"chain.invoke(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -256,7 +235,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"this is really cool\")"
|
||||
"chain.invoke(\"this is really cool\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -276,7 +255,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -109,7 +109,7 @@
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import AzureChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -74,11 +74,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models.azureml_endpoint import (\n",
|
||||
" AzureMLEndpointApiType,\n",
|
||||
" LlamaChatContentFormatter,\n",
|
||||
")"
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -105,8 +105,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models.azureml_endpoint import LlamaContentFormatter\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = AzureMLChatOnlineEndpoint(\n",
|
||||
" endpoint_url=\"https://<your-endpoint>.<your_region>.inference.ml.azure.com/score\",\n",
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatBaichuan"
|
||||
"from langchain_community.chat_models import ChatBaichuan\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -47,8 +47,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import BedrockChat"
|
||||
"from langchain_community.chat_models import BedrockChat\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -68,8 +68,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatDeepInfra\n",
|
||||
"from langchain.schema import HumanMessage"
|
||||
"from langchain_community.chat_models import ChatDeepInfra\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,7 +216,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -76,8 +76,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ErnieBotChat\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = ErnieBotChat(\n",
|
||||
" ernie_client_id=\"YOUR_CLIENT_ID\", ernie_client_secret=\"YOUR_CLIENT_SECRET\"\n",
|
||||
|
||||
@@ -73,8 +73,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n",
|
||||
@@ -127,8 +127,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a humorous AI that delights people.\"),\n",
|
||||
@@ -185,8 +185,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a humorous AI that delights people.\"),\n",
|
||||
|
||||
@@ -37,8 +37,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models.fireworks import ChatFireworks"
|
||||
"from langchain_community.chat_models.fireworks import ChatFireworks\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
|
||||
@@ -320,20 +320,51 @@
|
||||
"4. Message may be blocked if they violate the safety checks of the LLM. In this case, the model will return an empty response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "54793b9e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Safety Settings\n",
|
||||
"\n",
|
||||
"Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the `safety_settings` attribute of the model. For example, to turn off safety blocking for dangerous content, you can construct your LLM as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75fdfad6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"from langchain_google_genai import (\n",
|
||||
" ChatGoogleGenerativeAI,\n",
|
||||
" HarmBlockThreshold,\n",
|
||||
" HarmCategory,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-pro\",\n",
|
||||
" safety_settings={\n",
|
||||
" HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e68e203d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For an enumeration of the categories and thresholds available, see Google's [safety setting types](https://ai.google.dev/api/python/google/generativeai/types/SafetySettingDict)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "92b5aca5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Additional Configuraation\n",
|
||||
"## Additional Configuration\n",
|
||||
"\n",
|
||||
"You can pass the following parameters to ChatGoogleGenerativeAI in order to customize the SDK's behavior:\n",
|
||||
"\n",
|
||||
|
||||
@@ -70,9 +70,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import GPTRouter\n",
|
||||
"from langchain_community.chat_models.gpt_router import GPTRouterModel"
|
||||
"from langchain_community.chat_models.gpt_router import GPTRouterModel\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
181
docs/docs/integrations/chat/groq.ipynb
Normal file
181
docs/docs/integrations/chat/groq.ipynb
Normal file
@@ -0,0 +1,181 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Groq\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Groq\n",
|
||||
"\n",
|
||||
"Install the langchain-groq package if not already installed:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-groq\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Request an [API key](https://wow.groq.com) and set it as an environment variable:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export GROQ_API_KEY=<YOUR API KEY>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Alternatively, you may configure the API key when you initialize ChatGroq."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the ChatGroq class and initialize it with a model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_groq import ChatGroq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"mixtral-8x7b-32768\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can view the available models [here](https://console.groq.com/docs/models).\n",
|
||||
"\n",
|
||||
"If you do not want to set your API key in the environment, you can pass it directly to the client:\n",
|
||||
"```python\n",
|
||||
"chat = ChatGroq(temperature=0, groq_api_key=\"YOUR_API_KEY\", model_name=\"mixtral-8x7b-32768\")\n",
|
||||
"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Write a prompt and invoke ChatGroq to create completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Low Latency Large Language Models (LLMs) are a type of artificial intelligence model that can understand and generate human-like text. The term \"low latency\" refers to the model\\'s ability to process and respond to inputs quickly, with minimal delay.\\n\\nThe importance of low latency in LLMs can be explained through the following points:\\n\\n1. Improved user experience: In real-time applications such as chatbots, virtual assistants, and interactive games, users expect quick and responsive interactions. Low latency LLMs can provide instant feedback and responses, creating a more seamless and engaging user experience.\\n\\n2. Better decision-making: In time-sensitive scenarios, such as financial trading or autonomous vehicles, low latency LLMs can quickly process and analyze vast amounts of data, enabling faster and more informed decision-making.\\n\\n3. Enhanced accessibility: For individuals with disabilities, low latency LLMs can help create more responsive and inclusive interfaces, such as voice-controlled assistants or real-time captioning systems.\\n\\n4. Competitive advantage: In industries where real-time data analysis and decision-making are crucial, low latency LLMs can provide a competitive edge by enabling businesses to react more quickly to market changes, customer needs, or emerging opportunities.\\n\\n5. Scalability: Low latency LLMs can efficiently handle a higher volume of requests and interactions, making them more suitable for large-scale applications and services.\\n\\nIn summary, low latency is an essential aspect of LLMs, as it significantly impacts user experience, decision-making, accessibility, competitiveness, and scalability. By minimizing delays and response times, low latency LLMs can unlock new possibilities and applications for artificial intelligence in various industries and scenarios.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant.\"\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\n",
|
||||
" \"text\": \"Explain the importance of low latency LLMs.\"\n",
|
||||
"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `ChatGroq` also supports async and streaming functionality:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"There's a star that shines up in the sky,\\nThe Sun, that makes the day bright and spry.\\nIt rises and sets,\\nIn a daily, predictable bet,\\nGiving life to the world, oh my!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"mixtral-8x7b-32768\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Write a Limerick about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"await chain.ainvoke({\"topic\": \"The Sun\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The moon's gentle glow\n",
|
||||
"Illuminates the night sky\n",
|
||||
"Peaceful and serene"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"llama2-70b-4096\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Write a haiku about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"for chunk in chain.stream({\"topic\": \"The Moon\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -24,8 +24,8 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import JinaChat"
|
||||
"from langchain_community.chat_models import JinaChat\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
654
docs/docs/integrations/chat/kinetica.ipynb
Normal file
654
docs/docs/integrations/chat/kinetica.ipynb
Normal file
@@ -0,0 +1,654 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Kinetica\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kinetica SqlAssist LLM Demo\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use Kinetica to transform natural language into SQL\n",
|
||||
"and simplify the process of data retrieval. This demo is intended to show the mechanics\n",
|
||||
"of creating and using a chain as opposed to the capabilities of the LLM.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"With the Kinetica LLM workflow you create an LLM context in the database that provides\n",
|
||||
"information needed for infefencing that includes tables, annotations, rules, and\n",
|
||||
"samples. Invoking ``ChatKinetica.load_messages_from_context()`` will retrieve the\n",
|
||||
"context information from the database so that it can be used to create a chat prompt.\n",
|
||||
"\n",
|
||||
"The chat prompt consists of a ``SystemMessage`` and pairs of\n",
|
||||
"``HumanMessage``/``AIMessage`` that contain the samples which are question/SQL\n",
|
||||
"pairs. You can append pairs samples to this list but it is not intended to\n",
|
||||
"facilitate a typical natural language conversation.\n",
|
||||
"\n",
|
||||
"When you create a chain from the chat prompt and execute it, the Kinetica LLM will\n",
|
||||
"generate SQL from the input. Optionally you can use ``KineticaSqlOutputParser`` to\n",
|
||||
"execute the SQL and return the result as a dataframe.\n",
|
||||
"\n",
|
||||
"Currently, 2 LLM's are supported for SQL generation: \n",
|
||||
"\n",
|
||||
"1. **Kinetica SQL-GPT**: This LLM is based on OpenAI ChatGPT API.\n",
|
||||
"2. **Kinetica SqlAssist**: This LLM is purpose built to integrate with the Kinetica\n",
|
||||
" database and it can run in a secure customer premise.\n",
|
||||
"\n",
|
||||
"For this demo we will be using **SqlAssist**. See the [Kinetica Documentation\n",
|
||||
"site](https://docs.kinetica.com/7.1/sql-gpt/concepts/) for more information.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"To get started you will need a Kinetica DB instance. If you don't have one you can\n",
|
||||
"obtain a [free development instance](https://cloud.kinetica.com/trynow).\n",
|
||||
"\n",
|
||||
"You will need to install the following packages..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install Langchain community and core packages\n",
|
||||
"%pip install --upgrade --quiet langchain-core langchain-community\n",
|
||||
"\n",
|
||||
"# Install Kineitca DB connection package\n",
|
||||
"%pip install --upgrade --quiet gpudb typeguard\n",
|
||||
"\n",
|
||||
"# Install packages needed for this tutorial\n",
|
||||
"%pip install --upgrade --quiet faker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Database Connection\n",
|
||||
"\n",
|
||||
"You must set the database connection in the following environment variables. If you are using a virtual environment you can set them in the `.env` file of the project:\n",
|
||||
"* `KINETICA_URL`: Database connection URL\n",
|
||||
"* `KINETICA_USER`: Database user\n",
|
||||
"* `KINETICA_PASSWD`: Secure password.\n",
|
||||
"\n",
|
||||
"If you can create an instance of `KineticaChatLLM` then you are successfully connected."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.kinetica import ChatKinetica\n",
|
||||
"\n",
|
||||
"kinetica_llm = ChatKinetica()\n",
|
||||
"\n",
|
||||
"# Test table we will create\n",
|
||||
"table_name = \"demo.user_profiles\"\n",
|
||||
"\n",
|
||||
"# LLM Context we will create\n",
|
||||
"kinetica_ctx = \"demo.test_llm_ctx\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create test data\n",
|
||||
"\n",
|
||||
"Before we can generate SQL we will need to create a Kinetica table and an LLM context that can inference the table.\n",
|
||||
"\n",
|
||||
"### Create some fake user profiles\n",
|
||||
"\n",
|
||||
"We will use the `faker` package to create a dataframe with 100 fake profiles."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>username</th>\n",
|
||||
" <th>name</th>\n",
|
||||
" <th>sex</th>\n",
|
||||
" <th>address</th>\n",
|
||||
" <th>mail</th>\n",
|
||||
" <th>birthdate</th>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>id</th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>eduardo69</td>\n",
|
||||
" <td>Haley Beck</td>\n",
|
||||
" <td>F</td>\n",
|
||||
" <td>59836 Carla Causeway Suite 939\\nPort Eugene, I...</td>\n",
|
||||
" <td>meltondenise@yahoo.com</td>\n",
|
||||
" <td>1997-09-09</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>lbarrera</td>\n",
|
||||
" <td>Joshua Stephens</td>\n",
|
||||
" <td>M</td>\n",
|
||||
" <td>3108 Christina Forges\\nPort Timothychester, KY...</td>\n",
|
||||
" <td>erica80@hotmail.com</td>\n",
|
||||
" <td>1924-05-05</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>bburton</td>\n",
|
||||
" <td>Paula Kaiser</td>\n",
|
||||
" <td>F</td>\n",
|
||||
" <td>Unit 7405 Box 3052\\nDPO AE 09858</td>\n",
|
||||
" <td>timothypotts@gmail.com</td>\n",
|
||||
" <td>1933-09-06</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>melissa49</td>\n",
|
||||
" <td>Wendy Reese</td>\n",
|
||||
" <td>F</td>\n",
|
||||
" <td>6408 Christopher Hill Apt. 459\\nNew Benjamin, ...</td>\n",
|
||||
" <td>dadams@gmail.com</td>\n",
|
||||
" <td>1988-07-28</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>melissacarter</td>\n",
|
||||
" <td>Manuel Rios</td>\n",
|
||||
" <td>M</td>\n",
|
||||
" <td>2241 Bell Gardens Suite 723\\nScottside, CA 38463</td>\n",
|
||||
" <td>williamayala@gmail.com</td>\n",
|
||||
" <td>1930-12-19</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" username name sex \\\n",
|
||||
"id \n",
|
||||
"0 eduardo69 Haley Beck F \n",
|
||||
"1 lbarrera Joshua Stephens M \n",
|
||||
"2 bburton Paula Kaiser F \n",
|
||||
"3 melissa49 Wendy Reese F \n",
|
||||
"4 melissacarter Manuel Rios M \n",
|
||||
"\n",
|
||||
" address mail \\\n",
|
||||
"id \n",
|
||||
"0 59836 Carla Causeway Suite 939\\nPort Eugene, I... meltondenise@yahoo.com \n",
|
||||
"1 3108 Christina Forges\\nPort Timothychester, KY... erica80@hotmail.com \n",
|
||||
"2 Unit 7405 Box 3052\\nDPO AE 09858 timothypotts@gmail.com \n",
|
||||
"3 6408 Christopher Hill Apt. 459\\nNew Benjamin, ... dadams@gmail.com \n",
|
||||
"4 2241 Bell Gardens Suite 723\\nScottside, CA 38463 williamayala@gmail.com \n",
|
||||
"\n",
|
||||
" birthdate \n",
|
||||
"id \n",
|
||||
"0 1997-09-09 \n",
|
||||
"1 1924-05-05 \n",
|
||||
"2 1933-09-06 \n",
|
||||
"3 1988-07-28 \n",
|
||||
"4 1930-12-19 "
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Generator\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"from faker import Faker\n",
|
||||
"\n",
|
||||
"Faker.seed(5467)\n",
|
||||
"faker = Faker(locale=\"en-US\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def profile_gen(count: int) -> Generator:\n",
|
||||
" for id in range(0, count):\n",
|
||||
" rec = dict(id=id, **faker.simple_profile())\n",
|
||||
" rec[\"birthdate\"] = pd.Timestamp(rec[\"birthdate\"])\n",
|
||||
" yield rec\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"load_df = pd.DataFrame.from_records(data=profile_gen(100), index=\"id\")\n",
|
||||
"load_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Kinetica table from the Dataframe"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>name</th>\n",
|
||||
" <th>type</th>\n",
|
||||
" <th>properties</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>username</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char32]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>name</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char32]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>sex</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char1]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>address</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char64]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>mail</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char32]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>5</th>\n",
|
||||
" <td>birthdate</td>\n",
|
||||
" <td>long</td>\n",
|
||||
" <td>[timestamp]</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" name type properties\n",
|
||||
"0 username string [char32]\n",
|
||||
"1 name string [char32]\n",
|
||||
"2 sex string [char1]\n",
|
||||
"3 address string [char64]\n",
|
||||
"4 mail string [char32]\n",
|
||||
"5 birthdate long [timestamp]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from gpudb import GPUdbTable\n",
|
||||
"\n",
|
||||
"gpudb_table = GPUdbTable.from_df(\n",
|
||||
" load_df,\n",
|
||||
" db=kinetica_llm.kdbc,\n",
|
||||
" table_name=table_name,\n",
|
||||
" clear_table=True,\n",
|
||||
" load_data=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# See the Kinetica column types\n",
|
||||
"gpudb_table.type_as_df()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the LLM context\n",
|
||||
"\n",
|
||||
"You can create an LLM Context using the Kinetica Workbench UI or you can manually create it with the `CREATE OR REPLACE CONTEXT` syntax. \n",
|
||||
"\n",
|
||||
"Here we create a context from the SQL syntax referencing the table we created."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'status': 'OK',\n",
|
||||
" 'message': '',\n",
|
||||
" 'data_type': 'execute_sql_response',\n",
|
||||
" 'response_time': 0.0148}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create an LLM context for the table.\n",
|
||||
"\n",
|
||||
"from gpudb import GPUdbException\n",
|
||||
"\n",
|
||||
"sql = f\"\"\"\n",
|
||||
"CREATE OR REPLACE CONTEXT {kinetica_ctx}\n",
|
||||
"(\n",
|
||||
" TABLE = demo.test_profiles\n",
|
||||
" COMMENT = 'Contains user profiles.'\n",
|
||||
"),\n",
|
||||
"(\n",
|
||||
" SAMPLES = (\n",
|
||||
" 'How many male users are there?' = \n",
|
||||
" 'select count(1) as num_users\n",
|
||||
" from demo.test_profiles\n",
|
||||
" where sex = ''M'';')\n",
|
||||
")\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _check_error(response: dict) -> None:\n",
|
||||
" status = response[\"status_info\"][\"status\"]\n",
|
||||
" if status != \"OK\":\n",
|
||||
" message = response[\"status_info\"][\"message\"]\n",
|
||||
" raise GPUdbException(\"[%s]: %s\" % (status, message))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"response = kinetica_llm.kdbc.execute_sql(sql)\n",
|
||||
"_check_error(response)\n",
|
||||
"response[\"status_info\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Langchain for inferencing\n",
|
||||
"\n",
|
||||
"In the example below we will create a chain from the previously created table and LLM context. This chain will generate SQL and return the resulting data as a dataframe.\n",
|
||||
"\n",
|
||||
"### Load the chat prompt from the Kinetica DB\n",
|
||||
"\n",
|
||||
"The `load_messages_from_context()` function will retrieve a context from the DB and convert it into a list of chat messages that we use to create a ``ChatPromptTemplate``."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"CREATE TABLE demo.test_profiles AS\n",
|
||||
"(\n",
|
||||
" username VARCHAR (32) NOT NULL,\n",
|
||||
" name VARCHAR (32) NOT NULL,\n",
|
||||
" sex VARCHAR (1) NOT NULL,\n",
|
||||
" address VARCHAR (64) NOT NULL,\n",
|
||||
" mail VARCHAR (32) NOT NULL,\n",
|
||||
" birthdate TIMESTAMP NOT NULL\n",
|
||||
");\n",
|
||||
"COMMENT ON TABLE demo.test_profiles IS 'Contains user profiles.';\n",
|
||||
"\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"How many male users are there?\n",
|
||||
"\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"select count(1) as num_users\n",
|
||||
" from demo.test_profiles\n",
|
||||
" where sex = 'M';\n",
|
||||
"\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"\u001b[33;1m\u001b[1;3m{input}\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# load the context from the database\n",
|
||||
"ctx_messages = kinetica_llm.load_messages_from_context(kinetica_ctx)\n",
|
||||
"\n",
|
||||
"# Add the input prompt. This is where input question will be substituted.\n",
|
||||
"ctx_messages.append((\"human\", \"{input}\"))\n",
|
||||
"\n",
|
||||
"# Create the prompt template.\n",
|
||||
"prompt_template = ChatPromptTemplate.from_messages(ctx_messages)\n",
|
||||
"prompt_template.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the chain\n",
|
||||
"\n",
|
||||
"The last element of this chain is `KineticaSqlOutputParser` that will execute the SQL and return a dataframe. This is optional and if we left it out then only SQL would be returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.kinetica import (\n",
|
||||
" KineticaSqlOutputParser,\n",
|
||||
" KineticaSqlResponse,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt_template | kinetica_llm | KineticaSqlOutputParser(kdbc=kinetica_llm.kdbc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate the SQL\n",
|
||||
"\n",
|
||||
"The chain we created will take a question as input and return a ``KineticaSqlResponse`` containing the generated SQL and data. The question must be relevant to the to LLM context we used to create the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"SQL: SELECT username, name\n",
|
||||
" FROM demo.test_profiles\n",
|
||||
" WHERE sex = 'F'\n",
|
||||
" ORDER BY username;\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>username</th>\n",
|
||||
" <th>name</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>alexander40</td>\n",
|
||||
" <td>Tina Ramirez</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>bburton</td>\n",
|
||||
" <td>Paula Kaiser</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>brian12</td>\n",
|
||||
" <td>Stefanie Williams</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>brownanna</td>\n",
|
||||
" <td>Jennifer Rowe</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>carl19</td>\n",
|
||||
" <td>Amanda Potts</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" username name\n",
|
||||
"0 alexander40 Tina Ramirez\n",
|
||||
"1 bburton Paula Kaiser\n",
|
||||
"2 brian12 Stefanie Williams\n",
|
||||
"3 brownanna Jennifer Rowe\n",
|
||||
"4 carl19 Amanda Potts"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Here you must ask a question relevant to the LLM context provided in the prompt template.\n",
|
||||
"response: KineticaSqlResponse = chain.invoke(\n",
|
||||
" {\"input\": \"What are the female users ordered by username?\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"SQL: {response.sql}\")\n",
|
||||
"response.dataframe.head()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -40,8 +40,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatKonko"
|
||||
"from langchain_community.chat_models import ChatKonko\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,8 +32,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatLiteLLM"
|
||||
"from langchain_community.chat_models import ChatLiteLLM\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -38,8 +38,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatLiteLLMRouter\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from litellm import Router"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" MessagesPlaceholder,\n",
|
||||
")\n",
|
||||
"from langchain.schema import SystemMessage\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"\n",
|
||||
"template_messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
|
||||
|
||||
@@ -39,8 +39,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import MiniMaxChat"
|
||||
"from langchain_community.chat_models import MiniMaxChat\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -278,7 +278,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
@@ -313,8 +313,8 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
@@ -463,8 +463,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"bakllava\", temperature=0)\n",
|
||||
"\n",
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"model.invoke(\"what is the weather in Boston?\")"
|
||||
]
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -62,8 +62,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import PromptLayerChatOpenAI"
|
||||
"from langchain_community.chat_models import PromptLayerChatOpenAI\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,8 +30,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\"\"\"For basic init and call\"\"\"\n",
|
||||
"from langchain.chat_models import ChatSparkLLM\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatSparkLLM\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = ChatSparkLLM(\n",
|
||||
" spark_app_id=\"<app_id>\", spark_api_key=\"<api_key>\", spark_api_secret=\"<api_secret>\"\n",
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatHunyuan"
|
||||
"from langchain_community.chat_models import ChatHunyuan\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -100,8 +100,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chatLLM = ChatTongyi(\n",
|
||||
" streaming=True,\n",
|
||||
@@ -128,7 +128,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -48,8 +48,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import VolcEngineMaasChat"
|
||||
"from langchain_community.chat_models import VolcEngineMaasChat\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -58,8 +58,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatYandexGPT"
|
||||
"from langchain_community.chat_models import ChatYandexGPT\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
463
docs/docs/integrations/chat/yuan2.ipynb
Normal file
463
docs/docs/integrations/chat/yuan2.ipynb
Normal file
@@ -0,0 +1,463 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: YUAN2\n",
|
||||
"---"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% raw\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# YUAN2.0\n",
|
||||
"\n",
|
||||
"This notebook shows how to use [YUAN2 API](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/docs/inference_server.md) in LangChain with the langchain.chat_models.ChatYuan2.\n",
|
||||
"\n",
|
||||
"[*Yuan2.0*](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/README-EN.md) is a new generation Fundamental Large Language Model developed by IEIT System. We have published all three models, Yuan 2.0-102B, Yuan 2.0-51B, and Yuan 2.0-2B. And we provide relevant scripts for pretraining, fine-tuning, and inference services for other developers. Yuan2.0 is based on Yuan1.0, utilizing a wider range of high-quality pre training data and instruction fine-tuning datasets to enhance the model's understanding of semantics, mathematics, reasoning, code, knowledge, and other aspects."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Getting started\n",
|
||||
"### Installation\n",
|
||||
"First, Yuan2.0 provided an OpenAI compatible API, and we integrate ChatYuan2 into langchain chat model by using OpenAI client.\n",
|
||||
"Therefore, ensure the openai package is installed in your Python environment. Run the following command:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Importing the Required Modules\n",
|
||||
"After installation, import the necessary modules to your Python script:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatYuan2\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Setting Up Your API server\n",
|
||||
"Setting up your OpenAI compatible API server following [yuan2 openai api server](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/README-EN.md).\n",
|
||||
"If you deployed api server locally, you can simply set `api_key=\"EMPTY\"` or anything you want.\n",
|
||||
"Just make sure, the `api_base` is set correctly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"yuan2_api_key = \"your_api_key\"\n",
|
||||
"yuan2_api_base = \"http://127.0.0.1:8001/v1\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Initialize the ChatYuan2 Model\n",
|
||||
"Here's how to initialize the chat model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatYuan2(\n",
|
||||
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
|
||||
" temperature=1.0,\n",
|
||||
" model_name=\"yuan2\",\n",
|
||||
" max_retries=3,\n",
|
||||
" streaming=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Basic Usage\n",
|
||||
"Invoke the model with system and human messages like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"你是一个人工智能助手。\"),\n",
|
||||
" HumanMessage(content=\"你好,你是谁?\"),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(chat(messages))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Basic Usage with streaming\n",
|
||||
"For continuous interaction, use the streaming feature:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"chat = ChatYuan2(\n",
|
||||
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
|
||||
" temperature=1.0,\n",
|
||||
" model_name=\"yuan2\",\n",
|
||||
" max_retries=3,\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"你是个旅游小助手。\"),\n",
|
||||
" HumanMessage(content=\"给我介绍一下北京有哪些好玩的。\"),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Advanced Features\n",
|
||||
"### Usage with async calls\n",
|
||||
"\n",
|
||||
"Invoke the model with non-blocking calls, like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def basic_agenerate():\n",
|
||||
" chat = ChatYuan2(\n",
|
||||
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
|
||||
" temperature=1.0,\n",
|
||||
" model_name=\"yuan2\",\n",
|
||||
" max_retries=3,\n",
|
||||
" )\n",
|
||||
" messages = [\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"你是个旅游小助手。\"),\n",
|
||||
" HumanMessage(content=\"给我介绍一下北京有哪些好玩的。\"),\n",
|
||||
" ]\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" result = await chat.agenerate(messages)\n",
|
||||
" print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"asyncio.run(basic_agenerate())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Usage with prompt template\n",
|
||||
"\n",
|
||||
"Invoke the model with non-blocking calls and used chat template like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def ainvoke_with_prompt_template():\n",
|
||||
" from langchain.prompts.chat import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" chat = ChatYuan2(\n",
|
||||
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
|
||||
" temperature=1.0,\n",
|
||||
" model_name=\"yuan2\",\n",
|
||||
" max_retries=3,\n",
|
||||
" )\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"你是一个诗人,擅长写诗。\"),\n",
|
||||
" (\"human\", \"给我写首诗,主题是{theme}。\"),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" chain = prompt | chat\n",
|
||||
" result = await chain.ainvoke({\"theme\": \"明月\"})\n",
|
||||
" print(f\"type(result): {type(result)}; {result}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"asyncio.run(ainvoke_with_prompt_template())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Usage with async calls in streaming\n",
|
||||
"For non-blocking calls with streaming output, use the astream method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def basic_astream():\n",
|
||||
" chat = ChatYuan2(\n",
|
||||
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
|
||||
" temperature=1.0,\n",
|
||||
" model_name=\"yuan2\",\n",
|
||||
" max_retries=3,\n",
|
||||
" )\n",
|
||||
" messages = [\n",
|
||||
" SystemMessage(content=\"你是个旅游小助手。\"),\n",
|
||||
" HumanMessage(content=\"给我介绍一下北京有哪些好玩的。\"),\n",
|
||||
" ]\n",
|
||||
" result = chat.astream(messages)\n",
|
||||
" async for chunk in result:\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"is_executing": true,
|
||||
"name": "#%%\n"
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"asyncio.run(basic_astream())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -79,8 +79,8 @@
|
||||
"import re\n",
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain.schema import BaseMessage, HumanMessage\n",
|
||||
"from langchain_community.chat_loaders import base as chat_loaders\n",
|
||||
"from langchain_core.messages import BaseMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"logger = logging.getLogger()\n",
|
||||
"\n",
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
"source": [
|
||||
"## 1. Select a dataset\n",
|
||||
"\n",
|
||||
"This notebook fine-tunes a model directly on selecting which runs to fine-tune on. You will often curate these from traced runs. You can learn more about LangSmith datasets in the docs [docs](https://docs.smith.langchain.com/evaluation/datasets).\n",
|
||||
"This notebook fine-tunes a model directly on selecting which runs to fine-tune on. You will often curate these from traced runs. You can learn more about LangSmith datasets in the docs [docs](https://docs.smith.langchain.com/evaluation/concepts#datasets).\n",
|
||||
"\n",
|
||||
"For the sake of this tutorial, we will upload an existing dataset here that you can use."
|
||||
]
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.adapters.openai import convert_message_to_dict\n",
|
||||
"from langchain.schema import AIMessage"
|
||||
"from langchain_core.messages import AIMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -78,8 +78,8 @@
|
||||
"import re\n",
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain.schema import BaseMessage, HumanMessage\n",
|
||||
"from langchain_community.chat_loaders import base as chat_loaders\n",
|
||||
"from langchain_core.messages import BaseMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"logger = logging.getLogger()\n",
|
||||
"\n",
|
||||
|
||||
110
docs/docs/integrations/document_loaders/athena.ipynb
Normal file
110
docs/docs/integrations/document_loaders/athena.ipynb
Normal file
@@ -0,0 +1,110 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MwTWzDxYgbrR"
|
||||
},
|
||||
"source": [
|
||||
"# Athena\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to load documents from AWS Athena"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "F0zaLR3xgWmO"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "076NLjfngoWJ"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.athena import AthenaLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "XpMRQwU9gu44"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"database_name = \"my_database\"\n",
|
||||
"s3_output_path = \"s3://my_bucket/query_results/\"\n",
|
||||
"query = \"SELECT * FROM my_table\"\n",
|
||||
"profile_name = \"my_profile\"\n",
|
||||
"\n",
|
||||
"loader = AthenaLoader(\n",
|
||||
" query=query,\n",
|
||||
" database=database_name,\n",
|
||||
" s3_output_uri=s3_output_path,\n",
|
||||
" profile_name=profile_name,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"documents = loader.load()\n",
|
||||
"print(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5IBapL3ejoEt"
|
||||
},
|
||||
"source": [
|
||||
"Example with metadata columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "wMx6nI1qjryD"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"database_name = \"my_database\"\n",
|
||||
"s3_output_path = \"s3://my_bucket/query_results/\"\n",
|
||||
"query = \"SELECT * FROM my_table\"\n",
|
||||
"profile_name = \"my_profile\"\n",
|
||||
"metadata_columns = [\"_row\", \"_created_at\"]\n",
|
||||
"\n",
|
||||
"loader = AthenaLoader(\n",
|
||||
" query=query,\n",
|
||||
" database=database_name,\n",
|
||||
" s3_output_uri=s3_output_path,\n",
|
||||
" profile_name=profile_name,\n",
|
||||
" metadata_columns=metadata_columns,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"documents = loader.load()\n",
|
||||
"print(documents)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -72,57 +72,72 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Init from a cassandra driver Session\n",
|
||||
"\n",
|
||||
"You need to create a `cassandra.cluster.Session` object, as described in the [Cassandra driver documentation](https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/cluster/#module-cassandra.cluster). The details vary (e.g. with network settings and authentication), but this might be something like:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from cassandra.cluster import Cluster\n",
|
||||
"\n",
|
||||
"cluster = Cluster()\n",
|
||||
"session = cluster.connect()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"execution_count": null
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"You need to provide the name of an existing keyspace of the Cassandra instance:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"CASSANDRA_KEYSPACE = input(\"CASSANDRA_KEYSPACE = \")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"execution_count": null
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"Creating the document loader:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -144,18 +159,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
],
|
||||
"execution_count": 17,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-19T15:47:26.399472Z",
|
||||
"start_time": "2024-01-19T15:47:26.389145Z"
|
||||
},
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"execution_count": 17
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -169,7 +187,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "Document(page_content='Row(_id=\\'659bdffa16cbc4586b11a423\\', title=\\'Dangerous Men\\', reviewtext=\\'\"Dangerous Men,\" the picture\\\\\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\\')', metadata={'table': 'movie_reviews', 'keyspace': 'default_keyspace'})"
|
||||
"text/plain": [
|
||||
"Document(page_content='Row(_id=\\'659bdffa16cbc4586b11a423\\', title=\\'Dangerous Men\\', reviewtext=\\'\"Dangerous Men,\" the picture\\\\\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\\')', metadata={'table': 'movie_reviews', 'keyspace': 'default_keyspace'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
@@ -182,17 +202,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### Init from cassio\n",
|
||||
"\n",
|
||||
"It's also possible to use cassio to configure the session and keyspace."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cassio\n",
|
||||
@@ -204,11 +234,16 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"execution_count": null
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Attribution statement\n",
|
||||
"\n",
|
||||
"> Apache Cassandra, Cassandra and Apache are either registered trademarks or trademarks of the [Apache Software Foundation](http://www.apache.org/) in the United States and/or other countries."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -233,7 +268,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.9.17"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
88
docs/docs/integrations/document_loaders/pebblo.ipynb
Normal file
88
docs/docs/integrations/document_loaders/pebblo.ipynb
Normal file
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Pebblo Safe DocumentLoader\n",
|
||||
"\n",
|
||||
"> [Pebblo](https://github.com/daxa-ai/pebblo) enables developers to safely load data and promote their Gen AI app to deployment without worrying about the organization’s compliance and security requirements. The project identifies semantic topics and entities found in the loaded data and summarizes them on the UI or a PDF report.\n",
|
||||
"\n",
|
||||
"Pebblo has two components.\n",
|
||||
"\n",
|
||||
"1. Pebblo Safe DocumentLoader for Langchain\n",
|
||||
"1. Pebblo Daemon\n",
|
||||
"\n",
|
||||
"This document describes how to augment your existing Langchain DocumentLoader with Pebblo Safe DocumentLoader to get deep data visibility on the types of Topics and Entities ingested into the Gen-AI Langchain application. For details on `Pebblo Daemon` see this [pebblo daemon](https://daxa-ai.github.io/pebblo-docs/daemon.html) document.\n",
|
||||
"\n",
|
||||
"Pebblo Safeloader enables safe data ingestion for Langchain `DocumentLoader`. This is done by wrapping the document loader call with `Pebblo Safe DocumentLoader`.\n",
|
||||
"\n",
|
||||
"#### How to Pebblo enable Document Loading?\n",
|
||||
"\n",
|
||||
"Assume a Langchain RAG application snippet using `CSVLoader` to read a CSV document for inference.\n",
|
||||
"\n",
|
||||
"Here is the snippet of Document loading using `CSVLoader`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"\n",
|
||||
"loader = CSVLoader(\"data/corp_sens_data.csv\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"print(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The Pebblo SafeLoader can be enabled with few lines of code change to the above snippet."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import PebbloSafeLoader\n",
|
||||
"\n",
|
||||
"loader = PebbloSafeLoader(\n",
|
||||
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
|
||||
" name=\"acme-corp-rag-1\", # App name (Mandatory)\n",
|
||||
" owner=\"Joe Smith\", # Owner (Optional)\n",
|
||||
" description=\"Support productivity RAG application\", # Description (Optional)\n",
|
||||
")\n",
|
||||
"documents = loader.load()\n",
|
||||
"print(documents)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -13,27 +13,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: nest_asyncio in /Users/tasp/Code/projects/langchain/.venv/lib/python3.10/site-packages (1.5.6)\n",
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.0.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet nest_asyncio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -54,11 +43,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader = SitemapLoader(web_path=\"https://langchain.readthedocs.io/sitemap.xml\")\n",
|
||||
"sitemap_loader = SitemapLoader(web_path=\"https://api.python.langchain.com/sitemap.xml\")\n",
|
||||
"\n",
|
||||
"docs = sitemap_loader.load()"
|
||||
]
|
||||
@@ -90,7 +79,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n', metadata={'source': 'https://api.python.langchain.com/en/stable/', 'loc': 'https://api.python.langchain.com/en/stable/', 'lastmod': '2023-10-13T18:13:26.966937+00:00', 'changefreq': 'weekly', 'priority': '1'})"
|
||||
"Document(page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n', metadata={'source': 'https://api.python.langchain.com/en/stable/', 'loc': 'https://api.python.langchain.com/en/stable/', 'lastmod': '2024-02-09T01:10:49.422114+00:00', 'changefreq': 'weekly', 'priority': '1'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -113,20 +102,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|##########| 1/1 [00:00<00:00, 16.39it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" web_path=\"https://langchain.readthedocs.io/sitemap.xml\",\n",
|
||||
" web_path=\" https://api.python.langchain.com/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://api.python.langchain.com/en/latest\"],\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
@@ -134,7 +115,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
@@ -142,10 +123,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n', metadata={'source': 'https://api.python.langchain.com/en/latest/', 'loc': 'https://api.python.langchain.com/en/latest/', 'lastmod': '2023-10-13T18:09:58.478681+00:00', 'changefreq': 'daily', 'priority': '0.9'})"
|
||||
"Document(page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n', metadata={'source': 'https://api.python.langchain.com/en/latest/', 'loc': 'https://api.python.langchain.com/en/latest/', 'lastmod': '2024-02-12T05:26:10.971077+00:00', 'changefreq': 'daily', 'priority': '0.9'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 28,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -183,7 +164,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -211,12 +192,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" \"https://langchain.readthedocs.io/sitemap.xml\",\n",
|
||||
" \"https://api.python.langchain.com/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://api.python.langchain.com/en/latest/\"],\n",
|
||||
" parsing_function=remove_nav_and_header_elements,\n",
|
||||
")"
|
||||
@@ -233,17 +214,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|##########| 3/3 [00:00<00:00, 12.46it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader = SitemapLoader(web_path=\"example_data/sitemap.xml\", is_local=True)\n",
|
||||
"\n",
|
||||
|
||||
@@ -9,7 +9,35 @@
|
||||
"\n",
|
||||
"This notebook covers how to load source code files using a special approach with language parsing: each top-level function and class in the code is loaded into separate documents. Any remaining code top-level code outside the already loaded functions and classes will be loaded into a separate document.\n",
|
||||
"\n",
|
||||
"This approach can potentially improve the accuracy of QA models over source code. Currently, the supported languages for code parsing are Python and JavaScript. The language used for parsing can be configured, along with the minimum number of lines required to activate the splitting based on syntax."
|
||||
"This approach can potentially improve the accuracy of QA models over source code.\n",
|
||||
"\n",
|
||||
"The supported languages for code parsing are:\n",
|
||||
"\n",
|
||||
"- C (*)\n",
|
||||
"- C++ (*)\n",
|
||||
"- C# (*)\n",
|
||||
"- COBOL\n",
|
||||
"- Go (*)\n",
|
||||
"- Java (*)\n",
|
||||
"- JavaScript (requires package `esprima`)\n",
|
||||
"- Kotlin (*)\n",
|
||||
"- Lua (*)\n",
|
||||
"- Perl (*)\n",
|
||||
"- Python\n",
|
||||
"- Ruby (*)\n",
|
||||
"- Rust (*)\n",
|
||||
"- Scala (*)\n",
|
||||
"- TypeScript (*)\n",
|
||||
"\n",
|
||||
"Items marked with (*) require the packages `tree_sitter` and `tree_sitter_languages`.\n",
|
||||
"It is straightforward to add support for additional languages using `tree_sitter`,\n",
|
||||
"although this currently requires modifying LangChain.\n",
|
||||
"\n",
|
||||
"The language used for parsing can be configured, along with the minimum number of\n",
|
||||
"lines required to activate the splitting based on syntax.\n",
|
||||
"\n",
|
||||
"If a language is not explicitly specified, `LanguageParser` will infer one from\n",
|
||||
"filename extensions, if present."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -19,7 +47,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet esprima"
|
||||
"%pip install -qU esprima esprima tree_sitter tree_sitter_languages"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -395,6 +423,33 @@
|
||||
"source": [
|
||||
"print(\"\\n\\n--8<--\\n\\n\".join([document.page_content for document in result]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Languages using Tree-sitter Template\n",
|
||||
"\n",
|
||||
"Expanding language support using the Tree-Sitter template involves a few essential steps:\n",
|
||||
"\n",
|
||||
"1. **Creating a New Language File**:\n",
|
||||
" - Begin by creating a new file in the designated directory (langchain/libs/community/langchain_community/document_loaders/parsers/language).\n",
|
||||
" - Model this file based on the structure and parsing logic of existing language files like **`cpp.py`**.\n",
|
||||
" - You will also need to create a file in the langchain directory (langchain/libs/langchain/langchain/document_loaders/parsers/language).\n",
|
||||
"2. **Parsing Language Specifics**:\n",
|
||||
" - Mimic the structure used in the **`cpp.py`** file, adapting it to suit the language you are incorporating.\n",
|
||||
" - The primary alteration involves adjusting the chunk query array to suit the syntax and structure of the language you are parsing.\n",
|
||||
"3. **Testing the Language Parser**:\n",
|
||||
" - For thorough validation, generate a test file specific to the new language. Create **`test_language.py`** in the designated directory(langchain/libs/community/tests/unit_tests/document_loaders/parsers/language).\n",
|
||||
" - Follow the example set by **`test_cpp.py`** to establish fundamental tests for the parsed elements in the new language.\n",
|
||||
"4. **Integration into the Parser and Text Splitter**:\n",
|
||||
" - Incorporate your new language within the **`language_parser.py`** file. Ensure to update LANGUAGE_EXTENSIONS and LANGUAGE_SEGMENTERS along with the docstring for LanguageParser to recognize and handle the added language.\n",
|
||||
" - Also, confirm that your language is included in **`text_splitter.py`** in class Language for proper parsing.\n",
|
||||
"\n",
|
||||
"By following these steps and ensuring comprehensive testing and integration, you'll successfully extend language support using the Tree-Sitter template.\n",
|
||||
"\n",
|
||||
"Best of luck!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -413,7 +468,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -198,8 +198,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_loaders import TensorflowDatasetLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"loader = TensorflowDatasetLoader(\n",
|
||||
" dataset_name=\"mlqa/en\",\n",
|
||||
|
||||
189
docs/docs/integrations/document_loaders/tidb.ipynb
Normal file
189
docs/docs/integrations/document_loaders/tidb.ipynb
Normal file
@@ -0,0 +1,189 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TiDB\n",
|
||||
"\n",
|
||||
"> [TiDB](https://github.com/pingcap/tidb) is an open-source, cloud-native, distributed, MySQL-Compatible database for elastic scale and real-time analytics.\n",
|
||||
"\n",
|
||||
"This notebook introduces how to use `TiDBLoader` to load data from TiDB in langchain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"Before using the `TiDBLoader`, we will install the following dependencies:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, we will configure the connection to a TiDB. In this notebook, we will follow the standard connection method provided by TiDB Cloud to establish a secure and efficient database connection."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"# copy from tidb cloud console,replace it with your own\n",
|
||||
"tidb_connection_string_template = \"mysql+pymysql://<USER>:<PASSWORD>@<HOST>:4000/<DB>?ssl_ca=/etc/ssl/cert.pem&ssl_verify_cert=true&ssl_verify_identity=true\"\n",
|
||||
"tidb_password = getpass.getpass(\"Input your TiDB password:\")\n",
|
||||
"tidb_connection_string = tidb_connection_string_template.replace(\n",
|
||||
" \"<PASSWORD>\", tidb_password\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Data from TiDB\n",
|
||||
"\n",
|
||||
"Here's a breakdown of some key arguments you can use to customize the behavior of the `TiDBLoader`:\n",
|
||||
"\n",
|
||||
"- `query` (str): This is the SQL query to be executed against the TiDB database. The query should select the data you want to load into your `Document` objects. \n",
|
||||
" For instance, you might use a query like `\"SELECT * FROM my_table\"` to fetch all data from `my_table`.\n",
|
||||
"\n",
|
||||
"- `page_content_columns` (Optional[List[str]]): Specifies the list of column names whose values should be included in the `page_content` of each `Document` object. \n",
|
||||
" If set to `None` (the default), all columns returned by the query are included in `page_content`. This allows you to tailor the content of each document based on specific columns of your data.\n",
|
||||
"\n",
|
||||
"- `metadata_columns` (Optional[List[str]]): Specifies the list of column names whose values should be included in the `metadata` of each `Document` object. \n",
|
||||
" By default, this list is empty, meaning no metadata will be included unless explicitly specified. This is useful for including additional information about each document that doesn't form part of the main content but is still valuable for processing or analysis."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine\n",
|
||||
"\n",
|
||||
"# Connect to the database\n",
|
||||
"engine = create_engine(tidb_connection_string)\n",
|
||||
"metadata = MetaData()\n",
|
||||
"table_name = \"test_tidb_loader\"\n",
|
||||
"\n",
|
||||
"# Create a table\n",
|
||||
"test_table = Table(\n",
|
||||
" table_name,\n",
|
||||
" metadata,\n",
|
||||
" Column(\"id\", Integer, primary_key=True),\n",
|
||||
" Column(\"name\", String(255)),\n",
|
||||
" Column(\"description\", String(255)),\n",
|
||||
")\n",
|
||||
"metadata.create_all(engine)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with engine.connect() as connection:\n",
|
||||
" transaction = connection.begin()\n",
|
||||
" try:\n",
|
||||
" connection.execute(\n",
|
||||
" test_table.insert(),\n",
|
||||
" [\n",
|
||||
" {\"name\": \"Item 1\", \"description\": \"Description of Item 1\"},\n",
|
||||
" {\"name\": \"Item 2\", \"description\": \"Description of Item 2\"},\n",
|
||||
" {\"name\": \"Item 3\", \"description\": \"Description of Item 3\"},\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
" transaction.commit()\n",
|
||||
" except:\n",
|
||||
" transaction.rollback()\n",
|
||||
" raise"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"------------------------------\n",
|
||||
"content: name: Item 1\n",
|
||||
"description: Description of Item 1\n",
|
||||
"metada: {'id': 1}\n",
|
||||
"------------------------------\n",
|
||||
"content: name: Item 2\n",
|
||||
"description: Description of Item 2\n",
|
||||
"metada: {'id': 2}\n",
|
||||
"------------------------------\n",
|
||||
"content: name: Item 3\n",
|
||||
"description: Description of Item 3\n",
|
||||
"metada: {'id': 3}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TiDBLoader\n",
|
||||
"\n",
|
||||
"# Setup TiDBLoader to retrieve data\n",
|
||||
"loader = TiDBLoader(\n",
|
||||
" connection_string=tidb_connection_string,\n",
|
||||
" query=f\"SELECT * FROM {table_name};\",\n",
|
||||
" page_content_columns=[\"name\", \"description\"],\n",
|
||||
" metadata_columns=[\"id\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Load data\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"# Display the loaded documents\n",
|
||||
"for doc in documents:\n",
|
||||
" print(\"-\" * 30)\n",
|
||||
" print(f\"content: {doc.page_content}\\nmetada: {doc.metadata}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_table.drop(bind=engine)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -32,8 +32,8 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import DoctranPropertyExtractor"
|
||||
"from langchain_community.document_transformers import DoctranPropertyExtractor\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,8 +30,8 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import DoctranQATransformer"
|
||||
"from langchain_community.document_transformers import DoctranQATransformer\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -28,8 +28,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import DoctranTextTranslator"
|
||||
"from langchain_community.document_transformers import DoctranTextTranslator\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -31,8 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import GoogleTranslateTransformer"
|
||||
"from langchain_community.document_transformers import GoogleTranslateTransformer\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user