mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 09:10:27 +00:00
Compare commits
2 Commits
renderer
...
wfh/add_hu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46ce880ebc | ||
|
|
9791a51d40 |
@@ -10,7 +10,7 @@ You can use the dev container configuration in this folder to build and run the
|
||||
You may use the button above, or follow these steps to open this repo in a Codespace:
|
||||
1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchain.
|
||||
1. Click on the **Codespaces** tab.
|
||||
1. Click **Create codespace on master**.
|
||||
1. Click **Create codespace on master** .
|
||||
|
||||
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
// The optional 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
|
||||
"workspaceFolder": "/workspaces/langchain",
|
||||
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
|
||||
|
||||
// Prevent the container from shutting down
|
||||
"overrideCommand": true
|
||||
|
||||
@@ -6,7 +6,7 @@ services:
|
||||
context: ..
|
||||
volumes:
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- ..:/workspaces/langchain:cached
|
||||
- ..:/workspaces:cached
|
||||
networks:
|
||||
- langchain-network
|
||||
# environment:
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/documentation.yml
vendored
9
.github/ISSUE_TEMPLATE/documentation.yml
vendored
@@ -26,13 +26,6 @@ body:
|
||||
[LangChain Github Discussions](https://github.com/langchain-ai/langchain/discussions),
|
||||
[LangChain Github Issues](https://github.com/langchain-ai/langchain/issues?q=is%3Aissue),
|
||||
[LangChain ChatBot](https://chat.langchain.com/)
|
||||
- type: input
|
||||
id: url
|
||||
attributes:
|
||||
label: URL
|
||||
description: URL to documentation
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
@@ -55,4 +48,4 @@ body:
|
||||
label: "Idea or request for content:"
|
||||
description: >
|
||||
Please describe as clearly as possible what topics you think are missing
|
||||
from the current documentation.
|
||||
from the current documentation.
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -26,4 +26,4 @@ Additional guidelines:
|
||||
- Changes should be backwards compatible.
|
||||
- If you are adding something to community, do not re-import it in langchain.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17.
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
|
||||
|
||||
4
.github/actions/people/app/main.py
vendored
4
.github/actions/people/app/main.py
vendored
@@ -537,9 +537,7 @@ if __name__ == "__main__":
|
||||
"nfcampos",
|
||||
"efriis",
|
||||
"eyurtsev",
|
||||
"rlancemartin",
|
||||
"ccurme",
|
||||
"vbarda",
|
||||
"rlancemartin"
|
||||
}
|
||||
hidden_logins = {
|
||||
"dev2049",
|
||||
|
||||
75
.github/scripts/check_diff.py
vendored
75
.github/scripts/check_diff.py
vendored
@@ -1,52 +1,16 @@
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from typing import Dict, List, Set
|
||||
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
import glob
|
||||
from typing import Dict
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
"libs/text-splitters",
|
||||
"libs/langchain",
|
||||
"libs/community",
|
||||
"libs/langchain",
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
dependents = defaultdict(set)
|
||||
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
|
||||
if "template" in path:
|
||||
continue
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)['tool']['poetry']
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in pyproject['dependencies']:
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
return dependents
|
||||
|
||||
|
||||
def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
|
||||
updated = set()
|
||||
for dir_ in dirs_to_eval:
|
||||
# handle core manually because it has so many dependents
|
||||
if "core" in dir_:
|
||||
updated.add(dir_)
|
||||
continue
|
||||
pkg = "langchain-" + dir_.split("/")[-1]
|
||||
updated.update(dependents[pkg])
|
||||
updated.add(dir_)
|
||||
return list(updated)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
|
||||
@@ -55,13 +19,11 @@ if __name__ == "__main__":
|
||||
"test": set(),
|
||||
"extended-test": set(),
|
||||
}
|
||||
docs_edited = False
|
||||
|
||||
if len(files) >= 300:
|
||||
if len(files) == 300:
|
||||
# max diff length is 300 files - there are likely files missing
|
||||
dirs_to_run["lint"] = all_package_dirs()
|
||||
dirs_to_run["test"] = all_package_dirs()
|
||||
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
|
||||
raise ValueError("Max diff reached. Please manually run CI on changed libs.")
|
||||
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
@@ -85,20 +47,6 @@ if __name__ == "__main__":
|
||||
found = True
|
||||
if found:
|
||||
dirs_to_run["extended-test"].add(dir_)
|
||||
elif file.startswith("libs/standard-tests"):
|
||||
# TODO: update to include all packages that rely on standard-tests (all partner packages)
|
||||
# note: won't run on external repo partners
|
||||
dirs_to_run["lint"].add("libs/standard-tests")
|
||||
dirs_to_run["test"].add("libs/partners/mistralai")
|
||||
dirs_to_run["test"].add("libs/partners/openai")
|
||||
dirs_to_run["test"].add("libs/partners/anthropic")
|
||||
dirs_to_run["test"].add("libs/partners/ai21")
|
||||
dirs_to_run["test"].add("libs/partners/fireworks")
|
||||
dirs_to_run["test"].add("libs/partners/groq")
|
||||
|
||||
elif file.startswith("libs/cli"):
|
||||
# todo: add cli makefile
|
||||
pass
|
||||
elif file.startswith("libs/partners"):
|
||||
partner_dir = file.split("/")[2]
|
||||
if os.path.isdir(f"libs/partners/{partner_dir}") and [
|
||||
@@ -114,20 +62,15 @@ if __name__ == "__main__":
|
||||
"an update for this new library!"
|
||||
)
|
||||
elif any(file.startswith(p) for p in ["docs/", "templates/", "cookbook/"]):
|
||||
if file.startswith("docs/"):
|
||||
docs_edited = True
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
dependents = dependents_graph()
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
"dirs-to-lint": list(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"]
|
||||
),
|
||||
"dirs-to-test": add_dependents(dirs_to_run["test"] | dirs_to_run["extended-test"], dependents),
|
||||
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
"docs-edited": "true" if docs_edited else "",
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}")
|
||||
print(f"{key}={json_output}") # noqa: T201
|
||||
|
||||
34
.github/scripts/get_min_versions.py
vendored
34
.github/scripts/get_min_versions.py
vendored
@@ -4,25 +4,17 @@ import tomllib
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
MIN_VERSION_LIBS = [
|
||||
"langchain-core",
|
||||
"langchain-community",
|
||||
"langchain",
|
||||
"langchain-text-splitters",
|
||||
]
|
||||
MIN_VERSION_LIBS = ["langchain-core", "langchain-community", "langchain", "langchain-text-splitters"]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
# base regex for x.x.x with cases for rc/post/etc
|
||||
# valid strings: https://peps.python.org/pep-0440/#public-version-identifiers
|
||||
vstring = r"\d+(?:\.\d+){0,2}(?:(?:a|b|rc|\.post|\.dev)\d+)?"
|
||||
# case ^x.x.x
|
||||
_match = re.match(f"^\\^({vstring})$", version)
|
||||
_match = re.match(r"^\^(\d+(?:\.\d+){0,2})$", version)
|
||||
if _match:
|
||||
return _match.group(1)
|
||||
|
||||
# case >=x.x.x,<y.y.y
|
||||
_match = re.match(f"^>=({vstring}),<({vstring})$", version)
|
||||
_match = re.match(r"^>=(\d+(?:\.\d+){0,2}),<(\d+(?:\.\d+){0,2})$", version)
|
||||
if _match:
|
||||
_min = _match.group(1)
|
||||
_max = _match.group(2)
|
||||
@@ -30,7 +22,7 @@ def get_min_version(version: str) -> str:
|
||||
return _min
|
||||
|
||||
# case x.x.x
|
||||
_match = re.match(f"^({vstring})$", version)
|
||||
_match = re.match(r"^(\d+(?:\.\d+){0,2})$", version)
|
||||
if _match:
|
||||
return _match.group(1)
|
||||
|
||||
@@ -55,9 +47,6 @@ def get_min_version_from_toml(toml_path: str):
|
||||
# Get the version string
|
||||
version_string = dependencies[lib]
|
||||
|
||||
if isinstance(version_string, dict):
|
||||
version_string = version_string["version"]
|
||||
|
||||
# Use parse_version to get the minimum supported version from version_string
|
||||
min_version = get_min_version(version_string)
|
||||
|
||||
@@ -67,13 +56,12 @@ def get_min_version_from_toml(toml_path: str):
|
||||
return min_versions
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
)
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
|
||||
7
.github/workflows/.codespell-exclude
vendored
7
.github/workflows/.codespell-exclude
vendored
@@ -1,7 +0,0 @@
|
||||
libs/community/langchain_community/llms/yuan2.py
|
||||
"NotIn": "not in",
|
||||
- `/checkin`: Check-in
|
||||
docs/docs/integrations/providers/trulens.mdx
|
||||
self.assertIn(
|
||||
from trulens_eval import Tru
|
||||
tru = Tru()
|
||||
@@ -24,7 +24,6 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
1
.github/workflows/_dependencies.yml
vendored
1
.github/workflows/_dependencies.yml
vendored
@@ -28,7 +28,6 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: dependency checks ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
12
.github/workflows/_integration_test.yml
vendored
12
.github/workflows/_integration_test.yml
vendored
@@ -12,6 +12,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
environment: Scheduled testing
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
@@ -52,19 +53,11 @@ jobs:
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
@@ -82,9 +75,6 @@ jobs:
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
run: |
|
||||
make integration_tests
|
||||
|
||||
|
||||
2
.github/workflows/_lint.yml
vendored
2
.github/workflows/_lint.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.12"
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
129
.github/workflows/_release.yml
vendored
129
.github/workflows/_release.yml
vendored
@@ -13,11 +13,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
default: 'libs/langchain'
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
@@ -25,7 +20,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
if: github.ref == 'refs/heads/master'
|
||||
environment: Scheduled testing
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -60,7 +55,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -72,78 +67,19 @@ jobs:
|
||||
run: |
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
release-notes:
|
||||
needs:
|
||||
- build
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
release-body: ${{ steps.generate-release-body.outputs.release-body }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain
|
||||
path: langchain
|
||||
sparse-checkout: | # this only grabs files for relevant dir
|
||||
${{ inputs.working-directory }}
|
||||
ref: master # this scopes to just master branch
|
||||
fetch-depth: 0 # this fetches entire commit history
|
||||
- name: Check Tags
|
||||
id: check-tags
|
||||
shell: bash
|
||||
working-directory: langchain/${{ inputs.working-directory }}
|
||||
env:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
run: |
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
echo $REGEX
|
||||
PREV_TAG=$(git tag --sort=-creatordate | grep -P $REGEX || true | head -1)
|
||||
TAG="${PKG_NAME}==${VERSION}"
|
||||
if [ "$TAG" == "$PREV_TAG" ]; then
|
||||
echo "No new version to release"
|
||||
exit 1
|
||||
fi
|
||||
echo tag="$TAG" >> $GITHUB_OUTPUT
|
||||
echo prev-tag="$PREV_TAG" >> $GITHUB_OUTPUT
|
||||
- name: Generate release body
|
||||
id: generate-release-body
|
||||
working-directory: langchain
|
||||
env:
|
||||
WORKING_DIR: ${{ inputs.working-directory }}
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
TAG: ${{ steps.check-tags.outputs.tag }}
|
||||
PREV_TAG: ${{ steps.check-tags.outputs.prev-tag }}
|
||||
run: |
|
||||
PREAMBLE="Changes since $PREV_TAG"
|
||||
# if PREV_TAG is empty, then we are releasing the first version
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
PREAMBLE="Initial release"
|
||||
PREV_TAG=$(git rev-list --max-parents=0 HEAD)
|
||||
fi
|
||||
{
|
||||
echo 'release-body<<EOF'
|
||||
echo "# Release $TAG"
|
||||
echo $PREAMBLE
|
||||
echo
|
||||
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
test-pypi-publish:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}
|
||||
secrets: inherit
|
||||
|
||||
pre-release-checks:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -176,7 +112,7 @@ jobs:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
# Here we use:
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# that it takes priority (https://pypi.org/simple)
|
||||
# - The test PyPI index as an extra index, so that any dependencies that
|
||||
# are not found on test PyPI can be resolved and installed anyway.
|
||||
@@ -221,24 +157,6 @@ jobs:
|
||||
run: make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
@@ -278,16 +196,30 @@ jobs:
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
poetry run pip install $MIN_VERSIONS
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
publish:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
runs-on: ubuntu-latest
|
||||
@@ -314,7 +246,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -329,7 +261,6 @@ jobs:
|
||||
mark-release:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
- publish
|
||||
@@ -354,18 +285,18 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Create Tag
|
||||
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
if: ${{ inputs.working-directory == 'libs/langchain' }}
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
generateReleaseNotes: false
|
||||
tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}
|
||||
body: ${{ needs.release-notes.outputs.release-body }}
|
||||
commit: ${{ github.sha }}
|
||||
makeLatest: ${{ needs.build.outputs.pkg-name == 'langchain-core'}}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ needs.build.outputs.version }}
|
||||
commit: master
|
||||
|
||||
1
.github/workflows/_test.yml
vendored
1
.github/workflows/_test.yml
vendored
@@ -28,7 +28,6 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "make test #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
50
.github/workflows/_test_doc_imports.yml
vendored
50
.github/workflows/_test_doc_imports.yml
vendored
@@ -1,50 +0,0 @@
|
||||
name: test_doc_imports
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.12"
|
||||
name: "check doc imports #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Install langchain editable
|
||||
run: |
|
||||
poetry run pip install -e libs/core libs/langchain libs/community libs/experimental
|
||||
|
||||
- name: Check doc imports
|
||||
shell: bash
|
||||
run: |
|
||||
poetry run python docs/scripts/check_imports.py
|
||||
|
||||
- name: Ensure the test did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
11
.github/workflows/_test_release.yml
vendored
11
.github/workflows/_test_release.yml
vendored
@@ -7,11 +7,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -19,7 +14,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
if: github.ref == 'refs/heads/master'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
@@ -53,7 +48,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -81,7 +76,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
86
.github/workflows/api_doc_build.yml
vendored
Normal file
86
.github/workflows/api_doc_build.yml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: API docs build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: bagatur/api_docs_build
|
||||
path: langchain
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-datastax
|
||||
path: langchain-datastax
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-nvidia
|
||||
path: langchain-nvidia
|
||||
|
||||
- name: Set Git config
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git config --local user.email "actions@github.com"
|
||||
git config --local user.name "Github Actions"
|
||||
|
||||
- name: Merge master
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git fetch origin master
|
||||
git merge origin/master -m "Merge master" --allow-unrelated-histories -X theirs
|
||||
|
||||
- name: Move google libs
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/astradb \
|
||||
langchain/libs/partners/nvidia-trt \
|
||||
langchain/libs/partners/nvidia-ai-endpoints
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-datastax/libs/astradb langchain/libs/partners/astradb
|
||||
mv langchain-nvidia/libs/ai-endpoints langchain/libs/partners/nvidia-ai-endpoints
|
||||
mv langchain-nvidia/libs/trt langchain/libs/partners/nvidia-trt
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: api-docs
|
||||
working-directory: langchain
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: langchain
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
# skip airbyte and ibm due to pandas dependency issue
|
||||
poetry run python -m pip install $(ls ./libs/partners | grep -vE "airbyte|ibm" | xargs -I {} echo "./libs/partners/{}")
|
||||
poetry run python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
|
||||
- name: Build docs
|
||||
working-directory: langchain
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
poetry run python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference api_reference_build/html -j auto
|
||||
|
||||
# https://github.com/marketplace/actions/add-commit
|
||||
- uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
cwd: langchain
|
||||
message: 'Update API docs build'
|
||||
25
.github/workflows/check-broken-links.yml
vendored
25
.github/workflows/check-broken-links.yml
vendored
@@ -1,25 +0,0 @@
|
||||
name: Check Broken Links
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
jobs:
|
||||
check-links:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 18.x
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18.x
|
||||
cache: "yarn"
|
||||
cache-dependency-path: ./docs/yarn.lock
|
||||
- name: Install dependencies
|
||||
run: yarn install --immutable --mode=skip-build
|
||||
working-directory: ./docs
|
||||
- name: Check broken links
|
||||
run: yarn check-broken-links
|
||||
working-directory: ./docs
|
||||
16
.github/workflows/check_diffs.yml
vendored
16
.github/workflows/check_diffs.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- id: set-matrix
|
||||
@@ -36,7 +36,6 @@ jobs:
|
||||
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
|
||||
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
|
||||
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
|
||||
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
|
||||
lint:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
@@ -61,12 +60,6 @@ jobs:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test-doc-imports:
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
@@ -104,7 +97,6 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -124,9 +116,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install --with test
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_testing_deps.txt
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
@@ -144,7 +134,7 @@ jobs:
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
ci_success:
|
||||
name: "CI Success"
|
||||
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests, test-doc-imports]
|
||||
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests]
|
||||
if: |
|
||||
always()
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
31
.github/workflows/check_new_docs.yml
vendored
31
.github/workflows/check_new_docs.yml
vendored
@@ -1,31 +0,0 @@
|
||||
---
|
||||
name: Integration docs lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- name: Check new docs
|
||||
run: |
|
||||
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}
|
||||
16
.github/workflows/codespell.yml
vendored
16
.github/workflows/codespell.yml
vendored
@@ -3,9 +3,9 @@ name: CI / cd . / make spell_check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, v0.1]
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master, v0.1]
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -29,9 +29,9 @@ jobs:
|
||||
python .github/workflows/extract_ignored_words_list.py
|
||||
id: extract_ignore_words
|
||||
|
||||
# - name: Codespell
|
||||
# uses: codespell-project/actions-codespell@v2
|
||||
# with:
|
||||
# skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock
|
||||
# ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
# exclude_file: ./.github/workflows/codespell-exclude
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock
|
||||
ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
exclude_file: libs/community/langchain_community/llms/yuan2.py
|
||||
|
||||
@@ -7,4 +7,4 @@ ignore_words_list = (
|
||||
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
|
||||
)
|
||||
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201
|
||||
|
||||
87
.github/workflows/scheduled_test.yml
vendored
87
.github/workflows/scheduled_test.yml
vendored
@@ -10,62 +10,28 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: libs/langchain
|
||||
runs-on: ubuntu-latest
|
||||
environment: Scheduled testing
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
working-directory:
|
||||
- "libs/partners/openai"
|
||||
- "libs/partners/anthropic"
|
||||
- "libs/partners/ai21"
|
||||
- "libs/partners/fireworks"
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
- "libs/partners/together"
|
||||
- "libs/partners/cohere"
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
path: langchain
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-cohere
|
||||
path: langchain-cohere
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-aws
|
||||
path: langchain-aws
|
||||
|
||||
- name: Move libs
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/cohere
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
working-directory: libs/langchain
|
||||
cache-key: scheduled
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
@@ -79,15 +45,22 @@ jobs:
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
aws-region: ${{ vars.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: libs/langchain
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Run integration tests
|
||||
- name: Install deps outside pyproject
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
|
||||
shell: bash
|
||||
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
@@ -97,30 +70,12 @@ jobs:
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
run: |
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
make integration_tests
|
||||
|
||||
- name: Remove external libraries
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/cohere \
|
||||
langchain/libs/partners/aws
|
||||
make scheduled_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
working-directory: langchain
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -116,7 +116,6 @@ celerybeat.pid
|
||||
.env
|
||||
.envrc
|
||||
.venv*
|
||||
venv*
|
||||
env/
|
||||
ENV/
|
||||
env.bak/
|
||||
@@ -133,7 +132,6 @@ env.bak/
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.mypy_cache_test/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
@@ -179,4 +177,3 @@ _dist
|
||||
docs/docs/templates
|
||||
|
||||
prof
|
||||
virtualenv/
|
||||
|
||||
71
Makefile
71
Makefile
@@ -1,60 +1,44 @@
|
||||
.PHONY: all clean help docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck spell_check spell_fix lint lint_package lint_tests format format_diff
|
||||
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck
|
||||
|
||||
## help: Show this help info.
|
||||
help: Makefile
|
||||
@printf "\n\033[1mUsage: make <TARGETS> ...\033[0m\n\n\033[1mTargets:\033[0m\n\n"
|
||||
@sed -n 's/^## //p' $< | awk -F':' '{printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' | sort | sed -e 's/^/ /'
|
||||
|
||||
## all: Default target, shows help.
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
|
||||
## clean: Clean documentation and API documentation artifacts.
|
||||
clean: docs_clean api_docs_clean
|
||||
|
||||
######################
|
||||
# DOCUMENTATION
|
||||
######################
|
||||
|
||||
## docs_build: Build the documentation.
|
||||
clean: docs_clean api_docs_clean
|
||||
|
||||
|
||||
docs_build:
|
||||
cd docs && make build
|
||||
docs/.local_build.sh
|
||||
|
||||
## docs_clean: Clean the documentation build artifacts.
|
||||
docs_clean:
|
||||
cd docs && make clean
|
||||
@if [ -d _dist ]; then \
|
||||
rm -r _dist; \
|
||||
echo "Directory _dist has been cleaned."; \
|
||||
else \
|
||||
echo "Nothing to clean."; \
|
||||
fi
|
||||
|
||||
## docs_linkcheck: Run linkchecker on the documentation.
|
||||
docs_linkcheck:
|
||||
poetry run linkchecker _dist/docs/ --ignore-url node_modules
|
||||
|
||||
## api_docs_build: Build the API Reference documentation.
|
||||
api_docs_build:
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && poetry run make html
|
||||
|
||||
API_PKG ?= text-splitters
|
||||
|
||||
api_docs_quick_preview:
|
||||
poetry run pip install "pydantic<2"
|
||||
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && poetry run make html
|
||||
open docs/api_reference/_build/html/$(shell echo $(API_PKG) | sed 's/-/_/g')_api_reference.html
|
||||
|
||||
## api_docs_clean: Clean the API Reference documentation build artifacts.
|
||||
api_docs_clean:
|
||||
find ./docs/api_reference -name '*_api_reference.rst' -delete
|
||||
git clean -fdX ./docs/api_reference
|
||||
|
||||
rm -f docs/api_reference/api_reference.rst
|
||||
cd docs/api_reference && poetry run make clean
|
||||
|
||||
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.
|
||||
api_docs_linkcheck:
|
||||
poetry run linkchecker docs/api_reference/_build/html/index.html
|
||||
|
||||
## spell_check: Run codespell on the project.
|
||||
spell_check:
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
||||
## spell_fix: Run codespell on the project and fix the errors.
|
||||
spell_fix:
|
||||
poetry run codespell --toml pyproject.toml -w
|
||||
|
||||
@@ -62,14 +46,31 @@ spell_fix:
|
||||
# LINTING AND FORMATTING
|
||||
######################
|
||||
|
||||
## lint: Run linting on the project.
|
||||
lint lint_package lint_tests:
|
||||
poetry run ruff check docs templates cookbook
|
||||
poetry run ruff docs templates cookbook
|
||||
poetry run ruff format docs templates cookbook --diff
|
||||
poetry run ruff check --select I docs templates cookbook
|
||||
poetry run ruff --select I docs templates cookbook
|
||||
git grep 'from langchain import' docs/docs templates cookbook | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
## format: Format the project files.
|
||||
format format_diff:
|
||||
poetry run ruff format docs templates cookbook
|
||||
poetry run ruff check --select I --fix docs templates cookbook
|
||||
poetry run ruff --select I --fix docs templates cookbook
|
||||
|
||||
|
||||
######################
|
||||
# HELP
|
||||
######################
|
||||
|
||||
help:
|
||||
@echo '===================='
|
||||
@echo '-- DOCUMENTATION --'
|
||||
@echo 'clean - run docs_clean and api_docs_clean'
|
||||
@echo 'docs_build - build the documentation'
|
||||
@echo 'docs_clean - clean the documentation build artifacts'
|
||||
@echo 'docs_linkcheck - run linkchecker on the documentation'
|
||||
@echo 'api_docs_build - build the API Reference documentation'
|
||||
@echo 'api_docs_clean - clean the API Reference documentation build artifacts'
|
||||
@echo 'api_docs_linkcheck - run linkchecker on the API Reference documentation'
|
||||
@echo 'spell_check - run codespell on the project'
|
||||
@echo 'spell_fix - run codespell on the project and fix the errors'
|
||||
@echo '-- TEST and LINT tasks are within libs/*/ per-package --'
|
||||
|
||||
110
README.md
110
README.md
@@ -2,17 +2,17 @@
|
||||
|
||||
⚡ Build context-aware reasoning applications ⚡
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/check_diffs.yml)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://pepy.tech/project/langchain)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://twitter.com/langchainai)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
@@ -34,103 +34,79 @@ conda install langchain -c conda-forge
|
||||
|
||||
## 🤔 What is LangChain?
|
||||
|
||||
**LangChain** is a framework for developing applications powered by large language models (LLMs).
|
||||
**LangChain** is a framework for developing applications powered by language models. It enables applications that:
|
||||
- **Are context-aware**: connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.)
|
||||
- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
This framework consists of several parts.
|
||||
- **LangChain Libraries**: The Python and JavaScript libraries. Contains interfaces and integrations for a myriad of components, a basic run time for combining these components into chains and agents, and off-the-shelf implementations of chains and agents.
|
||||
- **[LangChain Templates](templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.
|
||||
- **[LangServe](https://github.com/langchain-ai/langserve)**: A library for deploying LangChain chains as a REST API.
|
||||
- **[LangSmith](https://smith.langchain.com)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
- **[LangGraph](https://python.langchain.com/docs/langgraph)**: LangGraph is a library for building stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain. It extends the LangChain Expression Language with the ability to coordinate multiple chains (or actors) across multiple steps of computation in a cyclic manner.
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
|
||||
The LangChain libraries themselves are made up of several different packages.
|
||||
- **[`langchain-core`](libs/core)**: Base abstractions and LangChain Expression Language.
|
||||
- **[`langchain-community`](libs/community)**: Third party integrations.
|
||||
- **[`langchain`](libs/langchain)**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
|
||||
### Open-source libraries
|
||||
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
### Productionization:
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
|
||||
|
||||

|
||||

|
||||
|
||||
## 🧱 What can you build with LangChain?
|
||||
**❓ Retrieval augmented generation**
|
||||
|
||||
**❓ Question answering with RAG**
|
||||
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/rag/)
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/question_answering/)
|
||||
- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
|
||||
|
||||
**🧱 Extracting structured output**
|
||||
**💬 Analyzing structured data**
|
||||
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/extraction/)
|
||||
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain-extract/)
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/qa_structured/sql)
|
||||
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain/tree/master/templates/sql-llama2)
|
||||
|
||||
**🤖 Chatbots**
|
||||
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/chatbot/)
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/chatbots)
|
||||
- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
|
||||
|
||||
And much more! Head to the [Tutorials](https://python.langchain.com/v0.2/docs/tutorials/) section of the docs for more.
|
||||
And much more! Head to the [Use cases](https://python.langchain.com/docs/use_cases/) section of the docs for more.
|
||||
|
||||
## 🚀 How does LangChain help?
|
||||
The main value props of the LangChain libraries are:
|
||||
1. **Components**: composable building blocks, tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
|
||||
1. **Components**: composable tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
|
||||
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
|
||||
|
||||
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
|
||||
LCEL is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
|
||||
|
||||
- **[Overview](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel)**: LCEL and its benefits
|
||||
- **[Interface](https://python.langchain.com/v0.2/docs/concepts/#runnable-interface)**: The standard Runnable interface for LCEL objects
|
||||
- **[Primitives](https://python.langchain.com/v0.2/docs/how_to/#langchain-expression-language-lcel)**: More on the primitives LCEL includes
|
||||
- **[Cheatsheet](https://python.langchain.com/v0.2/docs/how_to/lcel_cheatsheet/)**: Quick overview of the most common usage patterns
|
||||
|
||||
## Components
|
||||
|
||||
Components fall into the following **modules**:
|
||||
|
||||
**📃 Model I/O**
|
||||
**📃 Model I/O:**
|
||||
|
||||
This includes [prompt management](https://python.langchain.com/v0.2/docs/concepts/#prompt-templates), [prompt optimization](https://python.langchain.com/v0.2/docs/concepts/#example-selectors), a generic interface for [chat models](https://python.langchain.com/v0.2/docs/concepts/#chat-models) and [LLMs](https://python.langchain.com/v0.2/docs/concepts/#llms), and common utilities for working with [model outputs](https://python.langchain.com/v0.2/docs/concepts/#output-parsers).
|
||||
This includes prompt management, prompt optimization, a generic interface for all LLMs, and common utilities for working with LLMs.
|
||||
|
||||
**📚 Retrieval**
|
||||
**📚 Retrieval:**
|
||||
|
||||
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/v0.2/docs/concepts/#document-loaders) from a variety of sources, [preparing it](https://python.langchain.com/v0.2/docs/concepts/#text-splitters), then [searching over (a.k.a. retrieving from)](https://python.langchain.com/v0.2/docs/concepts/#retrievers) it for use in the generation step.
|
||||
Data Augmented Generation involves specific types of chains that first interact with an external data source to fetch data for use in the generation step. Examples include summarization of long pieces of text and question/answering over specific data sources.
|
||||
|
||||
**🤖 Agents**
|
||||
**🤖 Agents:**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end-to-end agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
Please see [here](https://python.langchain.com) for full documentation, which includes:
|
||||
|
||||
- [Introduction](https://python.langchain.com/v0.2/docs/introduction/): Overview of the framework and the structure of the docs.
|
||||
- [Tutorials](https://python.langchain.com/docs/use_cases/): If you're looking to build something specific or are more of a hands-on learner, check out our tutorials. This is the best place to get started.
|
||||
- [How-to guides](https://python.langchain.com/v0.2/docs/how_to/): Answers to “How do I….?” type questions. These guides are goal-oriented and concrete; they're meant to help you complete a specific task.
|
||||
- [Conceptual guide](https://python.langchain.com/v0.2/docs/concepts/): Conceptual explanations of the key parts of the framework.
|
||||
- [API Reference](https://api.python.langchain.com): Thorough documentation of every class and method.
|
||||
|
||||
## 🌐 Ecosystem
|
||||
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
- [Getting started](https://python.langchain.com/docs/get_started/introduction): installation, setting up the environment, simple examples
|
||||
- Overview of the [interfaces](https://python.langchain.com/docs/expression_language/), [modules](https://python.langchain.com/docs/modules/), and [integrations](https://python.langchain.com/docs/integrations/providers)
|
||||
- [Use case](https://python.langchain.com/docs/use_cases/qa_structured/sql) walkthroughs and best practice [guides](https://python.langchain.com/docs/guides/adapters/openai)
|
||||
- [LangSmith](https://python.langchain.com/docs/langsmith/), [LangServe](https://python.langchain.com/docs/langserve), and [LangChain Template](https://python.langchain.com/docs/templates/) overviews
|
||||
- [Reference](https://api.python.langchain.com): full API docs
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/v0.2/docs/contributing/).
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/docs/contributing/).
|
||||
|
||||
## 🌟 Contributors
|
||||
|
||||
|
||||
61
SECURITY.md
61
SECURITY.md
@@ -1,61 +1,6 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting OSS Vulnerabilities
|
||||
## Reporting a Vulnerability
|
||||
|
||||
LangChain is partnered with [huntr by Protect AI](https://huntr.com/) to provide
|
||||
a bounty program for our open source projects.
|
||||
|
||||
Please report security vulnerabilities associated with the LangChain
|
||||
open source projects by visiting the following link:
|
||||
|
||||
[https://huntr.com/bounties/disclose/](https://huntr.com/bounties/disclose/?target=https%3A%2F%2Fgithub.com%2Flangchain-ai%2Flangchain&validSearch=true)
|
||||
|
||||
Before reporting a vulnerability, please review:
|
||||
|
||||
1) In-Scope Targets and Out-of-Scope Targets below.
|
||||
2) The [langchain-ai/langchain](https://python.langchain.com/docs/contributing/repo_structure) monorepo structure.
|
||||
3) LangChain [security guidelines](https://python.langchain.com/docs/security) to
|
||||
understand what we consider to be a security vulnerability vs. developer
|
||||
responsibility.
|
||||
|
||||
### In-Scope Targets
|
||||
|
||||
The following packages and repositories are eligible for bug bounties:
|
||||
|
||||
- langchain-core
|
||||
- langchain (see exceptions)
|
||||
- langchain-community (see exceptions)
|
||||
- langgraph
|
||||
- langserve
|
||||
|
||||
### Out of Scope Targets
|
||||
|
||||
All out of scope targets defined by huntr as well as:
|
||||
|
||||
- **langchain-experimental**: This repository is for experimental code and is not
|
||||
eligible for bug bounties, bug reports to it will be marked as interesting or waste of
|
||||
time and published with no bounty attached.
|
||||
- **tools**: Tools in either langchain or langchain-community are not eligible for bug
|
||||
bounties. This includes the following directories
|
||||
- langchain/tools
|
||||
- langchain-community/tools
|
||||
- Please review our [security guidelines](https://python.langchain.com/docs/security)
|
||||
for more details, but generally tools interact with the real world. Developers are
|
||||
expected to understand the security implications of their code and are responsible
|
||||
for the security of their tools.
|
||||
- Code documented with security notices. This will be decided done on a case by
|
||||
case basis, but likely will not be eligible for a bounty as the code is already
|
||||
documented with guidelines for developers that should be followed for making their
|
||||
application secure.
|
||||
- Any LangSmith related repositories or APIs see below.
|
||||
|
||||
## Reporting LangSmith Vulnerabilities
|
||||
|
||||
Please report security vulnerabilities associated with LangSmith by email to `security@langchain.dev`.
|
||||
|
||||
- LangSmith site: https://smith.langchain.com
|
||||
- SDK client: https://github.com/langchain-ai/langsmith-sdk
|
||||
|
||||
### Other Security Concerns
|
||||
|
||||
For any other security concerns, please contact us at `security@langchain.dev`.
|
||||
Please report security vulnerabilities by email to `security@langchain.dev`.
|
||||
This email is an alias to a subset of our maintainers, and will ensure the issue is promptly triaged and acted upon as needed.
|
||||
|
||||
@@ -38,9 +38,9 @@
|
||||
"\n",
|
||||
"To run locally, we use Ollama.ai. \n",
|
||||
"\n",
|
||||
"See [here](/docs/integrations/chat/ollama) for details on installation and setup.\n",
|
||||
"See [here](https://python.langchain.com/docs/integrations/chat/ollama) for details on installation and setup.\n",
|
||||
"\n",
|
||||
"Also, see [here](/docs/guides/development/local_llms) for our full guide on local LLMs.\n",
|
||||
"Also, see [here](https://python.langchain.com/docs/guides/local_llms) for our full guide on local LLMs.\n",
|
||||
" \n",
|
||||
"To use an external API, which is not private, we can use Replicate."
|
||||
]
|
||||
|
||||
@@ -464,8 +464,8 @@
|
||||
" Check if the base64 data is an image by looking at the start of the data\n",
|
||||
" \"\"\"\n",
|
||||
" image_signatures = {\n",
|
||||
" b\"\\xff\\xd8\\xff\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n",
|
||||
" b\"\\xFF\\xD8\\xFF\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\": \"png\",\n",
|
||||
" b\"\\x47\\x49\\x46\\x38\": \"gif\",\n",
|
||||
" b\"\\x52\\x49\\x46\\x46\": \"webp\",\n",
|
||||
" }\n",
|
||||
@@ -604,7 +604,7 @@
|
||||
"source": [
|
||||
"# Check retrieval\n",
|
||||
"query = \"Give me company names that are interesting investments based on EV / NTM and NTM rev growth. Consider EV / NTM multiples vs historical?\"\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=6)\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=6)\n",
|
||||
"\n",
|
||||
"# We get 4 docs\n",
|
||||
"len(docs)"
|
||||
@@ -630,7 +630,7 @@
|
||||
"source": [
|
||||
"# Check retrieval\n",
|
||||
"query = \"What are the EV / NTM and NTM rev growth for MongoDB, Cloudflare, and Datadog?\"\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=6)\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=6)\n",
|
||||
"\n",
|
||||
"# We get 4 docs\n",
|
||||
"len(docs)"
|
||||
|
||||
@@ -185,7 +185,7 @@
|
||||
" )\n",
|
||||
" # Text summary chain\n",
|
||||
" model = VertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-pro\", max_tokens=1024\n",
|
||||
" temperature=0, model_name=\"gemini-pro\", max_output_tokens=1024\n",
|
||||
" ).with_fallbacks([empty_response])\n",
|
||||
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
@@ -254,9 +254,9 @@
|
||||
"\n",
|
||||
"def image_summarize(img_base64, prompt):\n",
|
||||
" \"\"\"Make image summary\"\"\"\n",
|
||||
" model = ChatVertexAI(model=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(model_name=\"gemini-pro-vision\", max_output_tokens=1024)\n",
|
||||
"\n",
|
||||
" msg = model.invoke(\n",
|
||||
" msg = model(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
@@ -462,8 +462,8 @@
|
||||
" Check if the base64 data is an image by looking at the start of the data\n",
|
||||
" \"\"\"\n",
|
||||
" image_signatures = {\n",
|
||||
" b\"\\xff\\xd8\\xff\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n",
|
||||
" b\"\\xFF\\xD8\\xFF\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\": \"png\",\n",
|
||||
" b\"\\x47\\x49\\x46\\x38\": \"gif\",\n",
|
||||
" b\"\\x52\\x49\\x46\\x46\": \"webp\",\n",
|
||||
" }\n",
|
||||
@@ -553,7 +553,9 @@
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # Multi-modal LLM\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-pro-vision\", max_output_tokens=1024\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # RAG pipeline\n",
|
||||
" chain = (\n",
|
||||
@@ -602,7 +604,7 @@
|
||||
],
|
||||
"source": [
|
||||
"query = \"What are the EV / NTM and NTM rev growth for MongoDB, Cloudflare, and Datadog?\"\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=1)\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=1)\n",
|
||||
"\n",
|
||||
"# We get 2 docs\n",
|
||||
"len(docs)"
|
||||
|
||||
@@ -535,9 +535,9 @@
|
||||
" print(f\"--Generated {len(all_clusters)} clusters--\")\n",
|
||||
"\n",
|
||||
" # Summarization\n",
|
||||
" template = \"\"\"Here is a sub-set of LangChain Expression Language doc. \n",
|
||||
" template = \"\"\"Here is a sub-set of LangChain Expression Langauge doc. \n",
|
||||
" \n",
|
||||
" LangChain Expression Language provides a way to compose chain in LangChain.\n",
|
||||
" LangChain Expression Langauge provides a way to compose chain in LangChain.\n",
|
||||
" \n",
|
||||
" Give a detailed summary of the documentation provided.\n",
|
||||
" \n",
|
||||
|
||||
@@ -47,7 +47,6 @@ Notebook | Description
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
[program_aided_language_model.i...](https://github.com/langchain-ai/langchain/tree/master/cookbook/program_aided_language_model.ipynb) | Implement program-aided language models as described in the provided research paper.
|
||||
[qa_citations.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/qa_citations.ipynb) | Different ways to get a model to cite its sources.
|
||||
[rag_upstage_layout_analysis_groundedness_check.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb) | End-to-end RAG example using Upstage Layout Analysis and Groundedness Check.
|
||||
[retrieval_in_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/retrieval_in_sql.ipynb) | Perform retrieval-augmented-generation (rag) on a PostgreSQL database using pgvector.
|
||||
[sales_agent_with_context.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/sales_agent_with_context.ipynb) | Implement a context-aware ai sales agent, salesgpt, that can have natural sales conversations, interact with other systems, and use a product knowledge base to discuss a company's offerings.
|
||||
[self_query_hotel_search.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/self_query_hotel_search.ipynb) | Build a hotel room search feature with self-querying retrieval, using a specific hotel recommendation dataset.
|
||||
@@ -57,4 +56,3 @@ Notebook | Description
|
||||
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
@@ -75,7 +75,7 @@
|
||||
"\n",
|
||||
"Apply to the [`LLaMA2`](https://arxiv.org/pdf/2307.09288.pdf) paper. \n",
|
||||
"\n",
|
||||
"We use the Unstructured [`partition_pdf`](https://unstructured-io.github.io/unstructured/core/partition.html#partition-pdf), which segments a PDF document by using a layout model. \n",
|
||||
"We use the Unstructured [`partition_pdf`](https://unstructured-io.github.io/unstructured/bricks/partition.html#partition-pdf), which segments a PDF document by using a layout model. \n",
|
||||
"\n",
|
||||
"This layout model makes it possible to extract elements, such as tables, from pdfs. \n",
|
||||
"\n",
|
||||
|
||||
@@ -562,7 +562,9 @@
|
||||
],
|
||||
"source": [
|
||||
"# We can retrieve this table\n",
|
||||
"retriever.invoke(\"What are results for LLaMA across across domains / subjects?\")[1]"
|
||||
"retriever.get_relevant_documents(\n",
|
||||
" \"What are results for LLaMA across across domains / subjects?\"\n",
|
||||
")[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -612,7 +614,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"Images / figures with playful and creative examples\")[1]"
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 1\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -191,15 +191,15 @@
|
||||
"source": [
|
||||
"## Multi-vector retriever\n",
|
||||
"\n",
|
||||
"Use [multi-vector-retriever](/docs/modules/data_connection/retrievers/multi_vector#summary).\n",
|
||||
"Use [multi-vector-retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector#summary).\n",
|
||||
"\n",
|
||||
"Summaries are used to retrieve raw tables and / or raw chunks of text.\n",
|
||||
"\n",
|
||||
"### Text and Table summaries\n",
|
||||
"\n",
|
||||
"Here, we use Ollama to run LLaMA2 locally. \n",
|
||||
"Here, we use ollama.ai to run LLaMA2 locally. \n",
|
||||
"\n",
|
||||
"See details on installation [here](/docs/guides/development/local_llms)."
|
||||
"See details on installation [here](https://python.langchain.com/docs/guides/local_llms)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -501,7 +501,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"Images / figures with playful and creative examples\")[0]"
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 0\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -342,7 +342,7 @@
|
||||
"# Testing on retrieval\n",
|
||||
"query = \"What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?\"\n",
|
||||
"suffix_for_images = \" Include any pie charts, graphs, or tables.\"\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query + suffix_for_images)"
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -532,8 +532,8 @@
|
||||
"def is_image_data(b64data):\n",
|
||||
" \"\"\"Check if the base64 data is an image by looking at the start of the data.\"\"\"\n",
|
||||
" image_signatures = {\n",
|
||||
" b\"\\xff\\xd8\\xff\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n",
|
||||
" b\"\\xFF\\xD8\\xFF\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\": \"png\",\n",
|
||||
" b\"\\x47\\x49\\x46\\x38\": \"gif\",\n",
|
||||
" b\"\\x52\\x49\\x46\\x46\": \"webp\",\n",
|
||||
" }\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -40,13 +40,11 @@
|
||||
"import nest_asyncio\n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_experimental.agents.agent_toolkits.pandas.base import (\n",
|
||||
" create_pandas_dataframe_agent,\n",
|
||||
")\n",
|
||||
"from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Needed since jupyter runs an async eventloop\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
@@ -59,7 +57,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)"
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=1.0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -90,7 +90,7 @@
|
||||
" ) -> AIMessage:\n",
|
||||
" messages = self.update_messages(input_message)\n",
|
||||
"\n",
|
||||
" output_message = self.model.invoke(messages)\n",
|
||||
" output_message = self.model(messages)\n",
|
||||
" self.update_messages(output_message)\n",
|
||||
"\n",
|
||||
" return output_message"
|
||||
|
||||
@@ -933,7 +933,7 @@
|
||||
"**Answer**: The LangChain class includes various types of retrievers such as:\n",
|
||||
"\n",
|
||||
"- ArxivRetriever\n",
|
||||
"- AzureAISearchRetriever\n",
|
||||
"- AzureCognitiveSearchRetriever\n",
|
||||
"- BM25Retriever\n",
|
||||
"- ChaindeskRetriever\n",
|
||||
"- ChatGPTPluginRetriever\n",
|
||||
@@ -993,7 +993,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'LangChain possesses a variety of retrievers including:\\n\\n1. ArxivRetriever\\n2. AzureAISearchRetriever\\n3. BM25Retriever\\n4. ChaindeskRetriever\\n5. ChatGPTPluginRetriever\\n6. ContextualCompressionRetriever\\n7. DocArrayRetriever\\n8. ElasticSearchBM25Retriever\\n9. EnsembleRetriever\\n10. GoogleVertexAISearchRetriever\\n11. AmazonKendraRetriever\\n12. KNNRetriever\\n13. LlamaIndexGraphRetriever\\n14. LlamaIndexRetriever\\n15. MergerRetriever\\n16. MetalRetriever\\n17. MilvusRetriever\\n18. MultiQueryRetriever\\n19. ParentDocumentRetriever\\n20. PineconeHybridSearchRetriever\\n21. PubMedRetriever\\n22. RePhraseQueryRetriever\\n23. RemoteLangChainRetriever\\n24. SelfQueryRetriever\\n25. SVMRetriever\\n26. TFIDFRetriever\\n27. TimeWeightedVectorStoreRetriever\\n28. VespaRetriever\\n29. WeaviateHybridSearchRetriever\\n30. WebResearchRetriever\\n31. WikipediaRetriever\\n32. ZepRetriever\\n33. ZillizRetriever\\n\\nIt also includes self query translators like:\\n\\n1. ChromaTranslator\\n2. DeepLakeTranslator\\n3. MyScaleTranslator\\n4. PineconeTranslator\\n5. QdrantTranslator\\n6. WeaviateTranslator\\n\\nAnd remote retrievers like:\\n\\n1. RemoteLangChainRetriever'}"
|
||||
"{'question': 'LangChain possesses a variety of retrievers including:\\n\\n1. ArxivRetriever\\n2. AzureCognitiveSearchRetriever\\n3. BM25Retriever\\n4. ChaindeskRetriever\\n5. ChatGPTPluginRetriever\\n6. ContextualCompressionRetriever\\n7. DocArrayRetriever\\n8. ElasticSearchBM25Retriever\\n9. EnsembleRetriever\\n10. GoogleVertexAISearchRetriever\\n11. AmazonKendraRetriever\\n12. KNNRetriever\\n13. LlamaIndexGraphRetriever\\n14. LlamaIndexRetriever\\n15. MergerRetriever\\n16. MetalRetriever\\n17. MilvusRetriever\\n18. MultiQueryRetriever\\n19. ParentDocumentRetriever\\n20. PineconeHybridSearchRetriever\\n21. PubMedRetriever\\n22. RePhraseQueryRetriever\\n23. RemoteLangChainRetriever\\n24. SelfQueryRetriever\\n25. SVMRetriever\\n26. TFIDFRetriever\\n27. TimeWeightedVectorStoreRetriever\\n28. VespaRetriever\\n29. WeaviateHybridSearchRetriever\\n30. WebResearchRetriever\\n31. WikipediaRetriever\\n32. ZepRetriever\\n33. ZillizRetriever\\n\\nIt also includes self query translators like:\\n\\n1. ChromaTranslator\\n2. DeepLakeTranslator\\n3. MyScaleTranslator\\n4. PineconeTranslator\\n5. QdrantTranslator\\n6. WeaviateTranslator\\n\\nAnd remote retrievers like:\\n\\n1. RemoteLangChainRetriever'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
@@ -1117,7 +1117,7 @@
|
||||
"The LangChain class includes various types of retrievers such as:\n",
|
||||
"\n",
|
||||
"- ArxivRetriever\n",
|
||||
"- AzureAISearchRetriever\n",
|
||||
"- AzureCognitiveSearchRetriever\n",
|
||||
"- BM25Retriever\n",
|
||||
"- ChaindeskRetriever\n",
|
||||
"- ChatGPTPluginRetriever\n",
|
||||
|
||||
@@ -1,557 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Python Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install the following Python modules:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install ipykernel python-dotenv cassio pandas langchain_openai langchain langchain-community langchainhub langchain_experimental openai-multi-tool-use-parallel-patch\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load the `.env` File"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Connection is via `cassio` using `auto=True` parameter, and the notebook uses OpenAI. You should create a `.env` file accordingly.\n",
|
||||
"\n",
|
||||
"For Casssandra, set:\n",
|
||||
"```bash\n",
|
||||
"CASSANDRA_CONTACT_POINTS\n",
|
||||
"CASSANDRA_USERNAME\n",
|
||||
"CASSANDRA_PASSWORD\n",
|
||||
"CASSANDRA_KEYSPACE\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For Astra, set:\n",
|
||||
"```bash\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN\n",
|
||||
"ASTRA_DB_DATABASE_ID\n",
|
||||
"ASTRA_DB_KEYSPACE\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For example:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# Connection to Astra:\n",
|
||||
"ASTRA_DB_DATABASE_ID=a1b2c3d4-...\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN=AstraCS:...\n",
|
||||
"ASTRA_DB_KEYSPACE=notebooks\n",
|
||||
"\n",
|
||||
"# Also set \n",
|
||||
"OPENAI_API_KEY=sk-....\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"(You may also modify the below code to directly connect with `cassio`.)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import cassio\n",
|
||||
"\n",
|
||||
"cassio.init(auto=True)\n",
|
||||
"session = cassio.config.resolve_session()\n",
|
||||
"if not session:\n",
|
||||
" raise Exception(\n",
|
||||
" \"Check environment configuration or manually configure cassio connection parameters\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"keyspace = os.environ.get(\n",
|
||||
" \"ASTRA_DB_KEYSPACE\", os.environ.get(\"CASSANDRA_KEYSPACE\", None)\n",
|
||||
")\n",
|
||||
"if not keyspace:\n",
|
||||
" raise ValueError(\"a KEYSPACE environment variable must be set\")\n",
|
||||
"\n",
|
||||
"session.set_keyspace(keyspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Database"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This needs to be done one time only!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Download Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The dataset used is from Kaggle, the [Environmental Sensor Telemetry Data](https://www.kaggle.com/datasets/garystafford/environmental-sensor-data-132k?select=iot_telemetry_data.csv). The next cell will download and unzip the data into a Pandas dataframe. The following cell is instructions to download manually. \n",
|
||||
"\n",
|
||||
"The net result of this section is you should have a Pandas dataframe variable `df`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download Automatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from io import BytesIO\n",
|
||||
"from zipfile import ZipFile\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"datasetURL = \"https://storage.googleapis.com/kaggle-data-sets/788816/1355729/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240404%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240404T115828Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=2849f003b100eb9dcda8dd8535990f51244292f67e4f5fad36f14aa67f2d4297672d8fe6ff5a39f03a29cda051e33e95d36daab5892b8874dcd5a60228df0361fa26bae491dd4371f02dd20306b583a44ba85a4474376188b1f84765147d3b4f05c57345e5de883c2c29653cce1f3755cd8e645c5e952f4fb1c8a735b22f0c811f97f7bce8d0235d0d3731ca8ab4629ff381f3bae9e35fc1b181c1e69a9c7913a5e42d9d52d53e5f716467205af9c8a3cc6746fc5352e8fbc47cd7d18543626bd67996d18c2045c1e475fc136df83df352fa747f1a3bb73e6ba3985840792ec1de407c15836640ec96db111b173bf16115037d53fdfbfd8ac44145d7f9a546aa\"\n",
|
||||
"\n",
|
||||
"response = requests.get(datasetURL)\n",
|
||||
"if response.status_code == 200:\n",
|
||||
" zip_file = ZipFile(BytesIO(response.content))\n",
|
||||
" csv_file_name = zip_file.namelist()[0]\n",
|
||||
"else:\n",
|
||||
" print(\"Failed to download the file\")\n",
|
||||
"\n",
|
||||
"with zip_file.open(csv_file_name) as csv_file:\n",
|
||||
" df = pd.read_csv(csv_file)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download Manually"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can download the `.zip` file and unpack the `.csv` contained within. Comment in the next line, and adjust the path to this `.csv` file appropriately."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# df = pd.read_csv(\"/path/to/iot_telemetry_data.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data into Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This section assumes the existence of a dataframe `df`, the following cell validates its structure. The Download section above creates this object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assert df is not None, \"Dataframe 'df' must be set\"\n",
|
||||
"expected_columns = [\n",
|
||||
" \"ts\",\n",
|
||||
" \"device\",\n",
|
||||
" \"co\",\n",
|
||||
" \"humidity\",\n",
|
||||
" \"light\",\n",
|
||||
" \"lpg\",\n",
|
||||
" \"motion\",\n",
|
||||
" \"smoke\",\n",
|
||||
" \"temp\",\n",
|
||||
"]\n",
|
||||
"assert all(\n",
|
||||
" [column in df.columns for column in expected_columns]\n",
|
||||
"), \"DataFrame does not have the expected columns\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create and load tables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import UTC, datetime\n",
|
||||
"\n",
|
||||
"from cassandra.query import BatchStatement\n",
|
||||
"\n",
|
||||
"# Create sensors table\n",
|
||||
"table_query = \"\"\"\n",
|
||||
"CREATE TABLE IF NOT EXISTS iot_sensors (\n",
|
||||
" device text,\n",
|
||||
" conditions text,\n",
|
||||
" room text,\n",
|
||||
" PRIMARY KEY (device)\n",
|
||||
")\n",
|
||||
"WITH COMMENT = 'Environmental IoT room sensor metadata.';\n",
|
||||
"\"\"\"\n",
|
||||
"session.execute(table_query)\n",
|
||||
"\n",
|
||||
"pstmt = session.prepare(\n",
|
||||
" \"\"\"\n",
|
||||
"INSERT INTO iot_sensors (device, conditions, room)\n",
|
||||
"VALUES (?, ?, ?)\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"devices = [\n",
|
||||
" (\"00:0f:00:70:91:0a\", \"stable conditions, cooler and more humid\", \"room 1\"),\n",
|
||||
" (\"1c:bf:ce:15:ec:4d\", \"highly variable temperature and humidity\", \"room 2\"),\n",
|
||||
" (\"b8:27:eb:bf:9d:51\", \"stable conditions, warmer and dryer\", \"room 3\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"for device, conditions, room in devices:\n",
|
||||
" session.execute(pstmt, (device, conditions, room))\n",
|
||||
"\n",
|
||||
"print(\"Sensors inserted successfully.\")\n",
|
||||
"\n",
|
||||
"# Create data table\n",
|
||||
"table_query = \"\"\"\n",
|
||||
"CREATE TABLE IF NOT EXISTS iot_data (\n",
|
||||
" day text,\n",
|
||||
" device text,\n",
|
||||
" ts timestamp,\n",
|
||||
" co double,\n",
|
||||
" humidity double,\n",
|
||||
" light boolean,\n",
|
||||
" lpg double,\n",
|
||||
" motion boolean,\n",
|
||||
" smoke double,\n",
|
||||
" temp double,\n",
|
||||
" PRIMARY KEY ((day, device), ts)\n",
|
||||
")\n",
|
||||
"WITH COMMENT = 'Data from environmental IoT room sensors. Columns include device identifier, timestamp (ts) of the data collection, carbon monoxide level (co), relative humidity, light presence, LPG concentration, motion detection, smoke concentration, and temperature (temp). Data is partitioned by day and device.';\n",
|
||||
"\"\"\"\n",
|
||||
"session.execute(table_query)\n",
|
||||
"\n",
|
||||
"pstmt = session.prepare(\n",
|
||||
" \"\"\"\n",
|
||||
"INSERT INTO iot_data (day, device, ts, co, humidity, light, lpg, motion, smoke, temp)\n",
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def insert_data_batch(name, group):\n",
|
||||
" batch = BatchStatement()\n",
|
||||
" day, device = name\n",
|
||||
" print(f\"Inserting batch for day: {day}, device: {device}\")\n",
|
||||
"\n",
|
||||
" for _, row in group.iterrows():\n",
|
||||
" timestamp = datetime.fromtimestamp(row[\"ts\"], UTC)\n",
|
||||
" batch.add(\n",
|
||||
" pstmt,\n",
|
||||
" (\n",
|
||||
" day,\n",
|
||||
" row[\"device\"],\n",
|
||||
" timestamp,\n",
|
||||
" row[\"co\"],\n",
|
||||
" row[\"humidity\"],\n",
|
||||
" row[\"light\"],\n",
|
||||
" row[\"lpg\"],\n",
|
||||
" row[\"motion\"],\n",
|
||||
" row[\"smoke\"],\n",
|
||||
" row[\"temp\"],\n",
|
||||
" ),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" session.execute(batch)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Convert columns to appropriate types\n",
|
||||
"df[\"light\"] = df[\"light\"] == \"true\"\n",
|
||||
"df[\"motion\"] = df[\"motion\"] == \"true\"\n",
|
||||
"df[\"ts\"] = df[\"ts\"].astype(float)\n",
|
||||
"df[\"day\"] = df[\"ts\"].apply(\n",
|
||||
" lambda x: datetime.fromtimestamp(x, UTC).strftime(\"%Y-%m-%d\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"grouped_df = df.groupby([\"day\", \"device\"])\n",
|
||||
"\n",
|
||||
"for name, group in grouped_df:\n",
|
||||
" insert_data_batch(name, group)\n",
|
||||
"\n",
|
||||
"print(\"Data load complete\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(session.keyspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Python `import` statements for the demo:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_openai_tools_agent\n",
|
||||
"from langchain_community.agent_toolkits.cassandra_database.toolkit import (\n",
|
||||
" CassandraDatabaseToolkit,\n",
|
||||
")\n",
|
||||
"from langchain_community.tools.cassandra_database.prompt import QUERY_PATH_PROMPT\n",
|
||||
"from langchain_community.tools.cassandra_database.tool import (\n",
|
||||
" GetSchemaCassandraDatabaseTool,\n",
|
||||
" GetTableDataCassandraDatabaseTool,\n",
|
||||
" QueryCassandraDatabaseTool,\n",
|
||||
")\n",
|
||||
"from langchain_community.utilities.cassandra_database import CassandraDatabase\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `CassandraDatabase` object is loaded from `cassio`, though it does accept a `Session`-type parameter as an alternative."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a CassandraDatabase instance\n",
|
||||
"db = CassandraDatabase(include_tables=[\"iot_sensors\", \"iot_data\"])\n",
|
||||
"\n",
|
||||
"# Create the Cassandra Database tools\n",
|
||||
"query_tool = QueryCassandraDatabaseTool(db=db)\n",
|
||||
"schema_tool = GetSchemaCassandraDatabaseTool(db=db)\n",
|
||||
"select_data_tool = GetTableDataCassandraDatabaseTool(db=db)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The tools can be invoked directly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test the tools\n",
|
||||
"print(\"Executing a CQL query:\")\n",
|
||||
"query = \"SELECT * FROM iot_sensors LIMIT 5;\"\n",
|
||||
"result = query_tool.run({\"query\": query})\n",
|
||||
"print(result)\n",
|
||||
"\n",
|
||||
"print(\"\\nGetting the schema for a keyspace:\")\n",
|
||||
"schema = schema_tool.run({\"keyspace\": keyspace})\n",
|
||||
"print(schema)\n",
|
||||
"\n",
|
||||
"print(\"\\nGetting data from a table:\")\n",
|
||||
"table = \"iot_data\"\n",
|
||||
"predicate = \"day = '2020-07-14' and device = 'b8:27:eb:bf:9d:51'\"\n",
|
||||
"data = select_data_tool.run(\n",
|
||||
" {\"keyspace\": keyspace, \"table\": table, \"predicate\": predicate, \"limit\": 5}\n",
|
||||
")\n",
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Agent Configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain_experimental.utilities import PythonREPL\n",
|
||||
"\n",
|
||||
"python_repl = PythonREPL()\n",
|
||||
"\n",
|
||||
"repl_tool = Tool(\n",
|
||||
" name=\"python_repl\",\n",
|
||||
" description=\"A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.\",\n",
|
||||
" func=python_repl.run,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4-1106-preview\")\n",
|
||||
"toolkit = CassandraDatabaseToolkit(db=db)\n",
|
||||
"\n",
|
||||
"# context = toolkit.get_context()\n",
|
||||
"# tools = toolkit.get_tools()\n",
|
||||
"tools = [schema_tool, select_data_tool, repl_tool]\n",
|
||||
"\n",
|
||||
"input = (\n",
|
||||
" QUERY_PATH_PROMPT\n",
|
||||
" + f\"\"\"\n",
|
||||
"\n",
|
||||
"Here is your task: In the {keyspace} keyspace, find the total number of times the temperature of each device has exceeded 23 degrees on July 14, 2020.\n",
|
||||
" Create a summary report including the name of the room. Use Pandas if helpful.\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"hwchase17/openai-tools-agent\")\n",
|
||||
"\n",
|
||||
"# messages = [\n",
|
||||
"# HumanMessagePromptTemplate.from_template(input),\n",
|
||||
"# AIMessage(content=QUERY_PATH_PROMPT),\n",
|
||||
"# MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n",
|
||||
"# ]\n",
|
||||
"\n",
|
||||
"# prompt = ChatPromptTemplate.from_messages(messages)\n",
|
||||
"# print(prompt)\n",
|
||||
"\n",
|
||||
"# Choose the LLM that will drive the agent\n",
|
||||
"# Only certain models support this\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)\n",
|
||||
"\n",
|
||||
"# Construct the OpenAI Tools agent\n",
|
||||
"agent = create_openai_tools_agent(llm, tools, prompt)\n",
|
||||
"\n",
|
||||
"print(\"Available tools:\")\n",
|
||||
"for tool in tools:\n",
|
||||
" print(\"\\t\" + tool.name + \" - \" + tool.description + \" - \" + str(tool))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n",
|
||||
"\n",
|
||||
"response = agent_executor.invoke({\"input\": input})\n",
|
||||
"\n",
|
||||
"print(response[\"output\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -169,7 +169,7 @@
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" return [ALL_TOOLS[d.metadata[\"index\"]] for d in docs]"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
|
||||
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.invoke(\"whats 2 + 2\")"
|
||||
"agent.run(\"whats 2 + 2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -362,7 +362,7 @@
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI()\n",
|
||||
"llm.invoke(query)"
|
||||
"llm(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -45,7 +45,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\"What is the derivative of sin(x)*exp(x) with respect to x?\")"
|
||||
"llm_symbolic_math.run(\"What is the derivative of sin(x)*exp(x) with respect to x?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -65,7 +65,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\n",
|
||||
"llm_symbolic_math.run(\n",
|
||||
" \"What is the integral of exp(x)*sin(x) + exp(x)*cos(x) with respect to x?\"\n",
|
||||
")"
|
||||
]
|
||||
@@ -94,7 +94,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke('Solve the differential equation y\" - y = e^t')"
|
||||
"llm_symbolic_math.run('Solve the differential equation y\" - y = e^t')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -114,7 +114,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\"What are the solutions to this equation y^3 + 1/3y?\")"
|
||||
"llm_symbolic_math.run(\"What are the solutions to this equation y^3 + 1/3y?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -134,7 +134,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\"x = y + 5, y = z - 3, z = x * y. Solve for x, y, z\")"
|
||||
"llm_symbolic_math.run(\"x = y + 5, y = z - 3, z = x * y. Solve for x, y, z\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -1,818 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "70b333e6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[](https://www.mongodb.com/developer/products/atlas/advanced-rag-langchain-mongodb/)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d84a72ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Adding Semantic Caching and Memory to your RAG Application using MongoDB and LangChain\n",
|
||||
"\n",
|
||||
"In this notebook, we will see how to use the new MongoDBCache and MongoDBChatMessageHistory in your RAG application.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "65527202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 1: Install required libraries\n",
|
||||
"\n",
|
||||
"- **datasets**: Python library to get access to datasets available on Hugging Face Hub\n",
|
||||
"\n",
|
||||
"- **langchain**: Python toolkit for LangChain\n",
|
||||
"\n",
|
||||
"- **langchain-mongodb**: Python package to use MongoDB as a vector store, semantic cache, chat history store etc. in LangChain\n",
|
||||
"\n",
|
||||
"- **langchain-openai**: Python package to use OpenAI models with LangChain\n",
|
||||
"\n",
|
||||
"- **pymongo**: Python toolkit for MongoDB\n",
|
||||
"\n",
|
||||
"- **pandas**: Python library for data analysis, exploration, and manipulation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cbc22fa4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -qU datasets langchain langchain-mongodb langchain-openai pymongo pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39c41e87",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 2: Setup pre-requisites\n",
|
||||
"\n",
|
||||
"* Set the MongoDB connection string. Follow the steps [here](https://www.mongodb.com/docs/manual/reference/connection-string/) to get the connection string from the Atlas UI.\n",
|
||||
"\n",
|
||||
"* Set the OpenAI API key. Steps to obtain an API key as [here](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b56412ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "16a20d7a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your MongoDB connection string:········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"MONGODB_URI = getpass.getpass(\"Enter your MongoDB connection string:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "978682d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your OpenAI API key:········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"OPENAI_API_KEY = getpass.getpass(\"Enter your OpenAI API key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "606081c5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Optional-- If you want to enable Langsmith -- good for debugging\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6b8302c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 3: Download the dataset\n",
|
||||
"\n",
|
||||
"We will be using MongoDB's [embedded_movies](https://huggingface.co/datasets/MongoDB/embedded_movies) dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1a3433a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"from datasets import load_dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aee5311b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Ensure you have an HF_TOKEN in your development enviornment:\n",
|
||||
"# access tokens can be created or copied from the Hugging Face platform (https://huggingface.co/docs/hub/en/security-tokens)\n",
|
||||
"\n",
|
||||
"# Load MongoDB's embedded_movies dataset from Hugging Face\n",
|
||||
"# https://huggingface.co/datasets/MongoDB/airbnb_embeddings\n",
|
||||
"\n",
|
||||
"data = load_dataset(\"MongoDB/embedded_movies\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1d630a26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = pd.DataFrame(data[\"train\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a1f94f43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 4: Data analysis\n",
|
||||
"\n",
|
||||
"Make sure length of the dataset is what we expect, drop Nones etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b276df71",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>fullplot</th>\n",
|
||||
" <th>type</th>\n",
|
||||
" <th>plot_embedding</th>\n",
|
||||
" <th>num_mflix_comments</th>\n",
|
||||
" <th>runtime</th>\n",
|
||||
" <th>writers</th>\n",
|
||||
" <th>imdb</th>\n",
|
||||
" <th>countries</th>\n",
|
||||
" <th>rated</th>\n",
|
||||
" <th>plot</th>\n",
|
||||
" <th>title</th>\n",
|
||||
" <th>languages</th>\n",
|
||||
" <th>metacritic</th>\n",
|
||||
" <th>directors</th>\n",
|
||||
" <th>awards</th>\n",
|
||||
" <th>genres</th>\n",
|
||||
" <th>poster</th>\n",
|
||||
" <th>cast</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>Young Pauline is left a lot of money when her ...</td>\n",
|
||||
" <td>movie</td>\n",
|
||||
" <td>[0.00072939653, -0.026834568, 0.013515796, -0....</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>199.0</td>\n",
|
||||
" <td>[Charles W. Goddard (screenplay), Basil Dickey...</td>\n",
|
||||
" <td>{'id': 4465, 'rating': 7.6, 'votes': 744}</td>\n",
|
||||
" <td>[USA]</td>\n",
|
||||
" <td>None</td>\n",
|
||||
" <td>Young Pauline is left a lot of money when her ...</td>\n",
|
||||
" <td>The Perils of Pauline</td>\n",
|
||||
" <td>[English]</td>\n",
|
||||
" <td>NaN</td>\n",
|
||||
" <td>[Louis J. Gasnier, Donald MacKenzie]</td>\n",
|
||||
" <td>{'nominations': 0, 'text': '1 win.', 'wins': 1}</td>\n",
|
||||
" <td>[Action]</td>\n",
|
||||
" <td>https://m.media-amazon.com/images/M/MV5BMzgxOD...</td>\n",
|
||||
" <td>[Pearl White, Crane Wilbur, Paul Panzer, Edwar...</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" fullplot type \\\n",
|
||||
"0 Young Pauline is left a lot of money when her ... movie \n",
|
||||
"\n",
|
||||
" plot_embedding num_mflix_comments \\\n",
|
||||
"0 [0.00072939653, -0.026834568, 0.013515796, -0.... 0 \n",
|
||||
"\n",
|
||||
" runtime writers \\\n",
|
||||
"0 199.0 [Charles W. Goddard (screenplay), Basil Dickey... \n",
|
||||
"\n",
|
||||
" imdb countries rated \\\n",
|
||||
"0 {'id': 4465, 'rating': 7.6, 'votes': 744} [USA] None \n",
|
||||
"\n",
|
||||
" plot title \\\n",
|
||||
"0 Young Pauline is left a lot of money when her ... The Perils of Pauline \n",
|
||||
"\n",
|
||||
" languages metacritic directors \\\n",
|
||||
"0 [English] NaN [Louis J. Gasnier, Donald MacKenzie] \n",
|
||||
"\n",
|
||||
" awards genres \\\n",
|
||||
"0 {'nominations': 0, 'text': '1 win.', 'wins': 1} [Action] \n",
|
||||
"\n",
|
||||
" poster \\\n",
|
||||
"0 https://m.media-amazon.com/images/M/MV5BMzgxOD... \n",
|
||||
"\n",
|
||||
" cast \n",
|
||||
"0 [Pearl White, Crane Wilbur, Paul Panzer, Edwar... "
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Previewing the contents of the data\n",
|
||||
"df.head(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "22ab375d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Only keep records where the fullplot field is not null\n",
|
||||
"df = df[df[\"fullplot\"].notna()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "fceed99a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Renaming the embedding field to \"embedding\" -- required by LangChain\n",
|
||||
"df.rename(columns={\"plot_embedding\": \"embedding\"}, inplace=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aedec13a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 5: Create a simple RAG chain using MongoDB as the vector store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "11d292f3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mongodb import MongoDBAtlasVectorSearch\n",
|
||||
"from pymongo import MongoClient\n",
|
||||
"\n",
|
||||
"# Initialize MongoDB python client\n",
|
||||
"client = MongoClient(MONGODB_URI, appname=\"devrel.content.python\")\n",
|
||||
"\n",
|
||||
"DB_NAME = \"langchain_chatbot\"\n",
|
||||
"COLLECTION_NAME = \"data\"\n",
|
||||
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"vector_index\"\n",
|
||||
"collection = client[DB_NAME][COLLECTION_NAME]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "d8292d53",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"DeleteResult({'n': 1000, 'electionId': ObjectId('7fffffff00000000000000f6'), 'opTime': {'ts': Timestamp(1710523288, 1033), 't': 246}, 'ok': 1.0, '$clusterTime': {'clusterTime': Timestamp(1710523288, 1042), 'signature': {'hash': b\"i\\xa8\\xe9'\\x1ed\\xf2u\\xf3L\\xff\\xb1\\xf5\\xbfA\\x90\\xabJ\\x12\\x83\", 'keyId': 7299545392000008318}}, 'operationTime': Timestamp(1710523288, 1033)}, acknowledged=True)"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Delete any existing records in the collection\n",
|
||||
"collection.delete_many({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "36c68914",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Data ingestion into MongoDB completed\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Data Ingestion\n",
|
||||
"records = df.to_dict(\"records\")\n",
|
||||
"collection.insert_many(records)\n",
|
||||
"\n",
|
||||
"print(\"Data ingestion into MongoDB completed\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "cbfca0b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# Using the text-embedding-ada-002 since that's what was used to create embeddings in the movies dataset\n",
|
||||
"embeddings = OpenAIEmbeddings(\n",
|
||||
" openai_api_key=OPENAI_API_KEY, model=\"text-embedding-ada-002\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "798e176c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Vector Store Creation\n",
|
||||
"vector_store = MongoDBAtlasVectorSearch.from_connection_string(\n",
|
||||
" connection_string=MONGODB_URI,\n",
|
||||
" namespace=DB_NAME + \".\" + COLLECTION_NAME,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
|
||||
" text_key=\"fullplot\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "c71cd087",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Using the MongoDB vector store as a retriever in a RAG chain\n",
|
||||
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 5})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "b6588cd3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Generate context using the retriever, and pass the user question through\n",
|
||||
"retrieve = {\n",
|
||||
" \"context\": retriever | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs])),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
"}\n",
|
||||
"template = \"\"\"Answer the question based only on the following context: \\\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"# Defining the chat prompt\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"# Defining the model to be used for chat completion\n",
|
||||
"model = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)\n",
|
||||
"# Parse output as a string\n",
|
||||
"parse_output = StrOutputParser()\n",
|
||||
"\n",
|
||||
"# Naive RAG chain\n",
|
||||
"naive_rag_chain = retrieve | prompt | model | parse_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "aaae21f5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Once a Thief'"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "75f929ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 6: Create a RAG chain with chat history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "94e7bd4a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "5bb30860",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_session_history(session_id: str) -> MongoDBChatMessageHistory:\n",
|
||||
" return MongoDBChatMessageHistory(\n",
|
||||
" MONGODB_URI, session_id, database_name=DB_NAME, collection_name=\"history\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "f51d0f35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Given a follow-up question and history, create a standalone question\n",
|
||||
"standalone_system_prompt = \"\"\"\n",
|
||||
"Given a chat history and a follow-up question, rephrase the follow-up question to be a standalone question. \\\n",
|
||||
"Do NOT answer the question, just reformulate it if needed, otherwise return it as is. \\\n",
|
||||
"Only return the final standalone question. \\\n",
|
||||
"\"\"\"\n",
|
||||
"standalone_question_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", standalone_system_prompt),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question_chain = standalone_question_prompt | model | parse_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "f3ef3354",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate context by passing output of the question_chain i.e. the standalone question to the retriever\n",
|
||||
"retriever_chain = RunnablePassthrough.assign(\n",
|
||||
" context=question_chain\n",
|
||||
" | retriever\n",
|
||||
" | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs]))\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"id": "5afb7345",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a prompt that includes the context, history and the follow-up question\n",
|
||||
"rag_system_prompt = \"\"\"Answer the question based only on the following context: \\\n",
|
||||
"{context}\n",
|
||||
"\"\"\"\n",
|
||||
"rag_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", rag_system_prompt),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"id": "f95f47d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# RAG chain\n",
|
||||
"rag_chain = retriever_chain | rag_prompt | model | parse_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "9618d395",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The best movie to watch when feeling down could be \"Last Action Hero.\" It\\'s a fun and action-packed film that blends reality and fantasy, offering an escape from the real world and providing an entertaining distraction.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 57,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# RAG chain with history\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" rag_chain,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"question\": \"What is the best movie to watch when sad?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "6e3080d1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I apologize for the confusion. Another movie that might lift your spirits when you\\'re feeling sad is \"Smilla\\'s Sense of Snow.\" It\\'s a mystery thriller that could engage your mind and distract you from your sadness with its intriguing plot and suspenseful storyline.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"Hmmm..I don't want to watch that one. Can you suggest something else?\"\n",
|
||||
" },\n",
|
||||
" {\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "daea2953",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'For a lighter movie option, you might enjoy \"Cousins.\" It\\'s a comedy film set in Barcelona with action and humor, offering a fun and entertaining escape from reality. The storyline is engaging and filled with comedic moments that could help lift your spirits.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"question\": \"How about something more light?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0de23a88",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 7: Get faster responses using Semantic Cache\n",
|
||||
"\n",
|
||||
"**NOTE:** Semantic cache only caches the input to the LLM. When using it in retrieval chains, remember that documents retrieved can change between runs resulting in cache misses for semantically similar queries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 61,
|
||||
"id": "5d6b6741",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.globals import set_llm_cache\n",
|
||||
"from langchain_mongodb.cache import MongoDBAtlasSemanticCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" MongoDBAtlasSemanticCache(\n",
|
||||
" connection_string=MONGODB_URI,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" collection_name=\"semantic_cache\",\n",
|
||||
" database_name=DB_NAME,\n",
|
||||
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
|
||||
" wait_until_ready=True, # Optional, waits until the cache is ready to be used\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 62,
|
||||
"id": "9825bc7b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 87.8 ms, sys: 670 µs, total: 88.5 ms\n",
|
||||
"Wall time: 1.24 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Once a Thief'"
|
||||
]
|
||||
},
|
||||
"execution_count": 62,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 63,
|
||||
"id": "a5e518cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 43.5 ms, sys: 4.16 ms, total: 47.7 ms\n",
|
||||
"Wall time: 255 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Once a Thief'"
|
||||
]
|
||||
},
|
||||
"execution_count": 63,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 64,
|
||||
"id": "3d3d3ad3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 115 ms, sys: 171 µs, total: 115 ms\n",
|
||||
"Wall time: 1.38 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I would recommend watching \"Last Action Hero\" when sad, as it is a fun and action-packed film that can help lift your spirits.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 64,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"naive_rag_chain.invoke(\"Which movie do I watch when sad?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "conda_pytorch_p310",
|
||||
"language": "python",
|
||||
"name": "conda_pytorch_p310"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -435,7 +435,7 @@
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = retriever.invoke(\"Woman with children\", k=10)\n",
|
||||
"docs = retriever.get_relevant_documents(\"Woman with children\", k=10)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -74,7 +74,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -234,7 +234,7 @@
|
||||
" termination_clause=self.termination_clause if self.stop else \"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.response = self.model.invoke(\n",
|
||||
" self.response = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=response_prompt),\n",
|
||||
@@ -263,7 +263,7 @@
|
||||
" speaker_names=speaker_names,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" choice_string = self.model.invoke(\n",
|
||||
" choice_string = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=choice_prompt),\n",
|
||||
@@ -299,7 +299,7 @@
|
||||
" ),\n",
|
||||
" next_speaker=self.next_speaker,\n",
|
||||
" )\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=next_prompt),\n",
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -164,7 +164,7 @@
|
||||
" message_history=\"\\n\".join(self.message_history),\n",
|
||||
" recent_message=self.message_history[-1],\n",
|
||||
" )\n",
|
||||
" bid_string = self.model.invoke([SystemMessage(content=prompt)]).content\n",
|
||||
" bid_string = self.model([SystemMessage(content=prompt)]).content\n",
|
||||
" return bid_string"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,497 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9fc3897d-176f-4729-8fd1-cfb4add53abd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Nomic multi-modal RAG\n",
|
||||
"\n",
|
||||
"Many documents contain a mixture of content types, including text and images. \n",
|
||||
"\n",
|
||||
"Yet, information captured in images is lost in most RAG applications.\n",
|
||||
"\n",
|
||||
"With the emergence of multimodal LLMs, like [GPT-4V](https://openai.com/research/gpt-4v-system-card), it is worth considering how to utilize images in RAG:\n",
|
||||
"\n",
|
||||
"In this demo we\n",
|
||||
"\n",
|
||||
"* Use multimodal embeddings from Nomic Embed [Vision](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) and [Text](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) to embed images and text\n",
|
||||
"* Retrieve both using similarity search\n",
|
||||
"* Pass raw images and text chunks to a multimodal LLM for answer synthesis \n",
|
||||
"\n",
|
||||
"## Signup\n",
|
||||
"\n",
|
||||
"Get your API token, then run:\n",
|
||||
"```\n",
|
||||
"! nomic login\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then run with your generated API token \n",
|
||||
"```\n",
|
||||
"! nomic login < token > \n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Packages\n",
|
||||
"\n",
|
||||
"For `unstructured`, you will also need `poppler` ([installation instructions](https://pdf2image.readthedocs.io/en/latest/installation.html)) and `tesseract` ([installation instructions](https://tesseract-ocr.github.io/tessdoc/Installation.html)) in your system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54926b9b-75c2-4cd4-8f14-b3882a0d370b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! nomic login token"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "febbc459-ebba-4c1a-a52b-fed7731593f8",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "acbdc603-39e2-4a5f-836c-2bbaecd46b0b",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
|
||||
"! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml pillow matplotlib tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e94b3fb-8e3e-4736-be0a-ad881626c7bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Loading\n",
|
||||
"\n",
|
||||
"### Partition PDF text and images\n",
|
||||
" \n",
|
||||
"Let's look at an example pdfs containing interesting images.\n",
|
||||
"\n",
|
||||
"1/ Art from the J Paul Getty museum:\n",
|
||||
"\n",
|
||||
" * Here is a [zip file](https://drive.google.com/file/d/18kRKbq2dqAhhJ3DfZRnYcTBEUfYxe1YR/view?usp=sharing) with the PDF and the already extracted images. \n",
|
||||
"* https://www.getty.edu/publications/resources/virtuallibrary/0892360224.pdf\n",
|
||||
"\n",
|
||||
"2/ Famous photographs from library of congress:\n",
|
||||
"\n",
|
||||
"* https://www.loc.gov/lcm/pdf/LCM_2020_1112.pdf\n",
|
||||
"* We'll use this as an example below\n",
|
||||
"\n",
|
||||
"We can use `partition_pdf` below from [Unstructured](https://unstructured-io.github.io/unstructured/introduction.html#key-concepts) to extract text and images.\n",
|
||||
"\n",
|
||||
"To supply this to extract the images:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=True\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If using this zip file, then you can simply process the text only with:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=False\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9646b524-71a7-4b2a-bdc8-0b81f77e968f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Folder with pdf and extracted images\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"# replace with actual path to images\n",
|
||||
"path = Path(\"../art\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f096ab-a933-41d0-8f4e-1efc83998fc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"path.resolve()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc4839c0-8773-4a07-ba59-5364501269b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Extract images, tables, and chunk text\n",
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=str(path.resolve()) + \"/getty.pdf\",\n",
|
||||
" extract_images_in_pdf=False,\n",
|
||||
" infer_table_structure=True,\n",
|
||||
" chunking_strategy=\"by_title\",\n",
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "969545ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Categorize text elements by type\n",
|
||||
"tables = []\n",
|
||||
"texts = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
" if \"unstructured.documents.elements.Table\" in str(type(element)):\n",
|
||||
" tables.append(str(element))\n",
|
||||
" elif \"unstructured.documents.elements.CompositeElement\" in str(type(element)):\n",
|
||||
" texts.append(str(element))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d8e6349-1547-4cbf-9c6f-491d8610ec10",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal embeddings with our document\n",
|
||||
"\n",
|
||||
"We will use [nomic-embed-vision-v1.5](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) embeddings. This model is aligned \n",
|
||||
"to [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) allowing for multimodal semantic search and Multimodal RAG!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4bc15842-cb95-4f84-9eb5-656b0282a800",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
"# Create chroma\n",
|
||||
"text_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_text\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"image_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_image\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Get image URIs with .jpg extension only\n",
|
||||
"image_uris = sorted(\n",
|
||||
" [\n",
|
||||
" os.path.join(path, image_name)\n",
|
||||
" for image_name in os.listdir(path)\n",
|
||||
" if image_name.endswith(\".jpg\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"image_vectorstore.add_images(uris=image_uris)\n",
|
||||
"\n",
|
||||
"# Add documents\n",
|
||||
"text_vectorstore.add_texts(texts=texts)\n",
|
||||
"\n",
|
||||
"# Make retriever\n",
|
||||
"image_retriever = image_vectorstore.as_retriever()\n",
|
||||
"text_retriever = text_vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02a186d0-27e0-4820-8092-63b5349dd25d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RAG\n",
|
||||
"\n",
|
||||
"`vectorstore.add_images` will store / retrieve images as base64 encoded strings.\n",
|
||||
"\n",
|
||||
"These can be passed to [GPT-4V](https://platform.openai.com/docs/guides/vision)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "344f56a8-0dc3-433e-851c-3f7600c7a72b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"import io\n",
|
||||
"from io import BytesIO\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"from PIL import Image\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def resize_base64_image(base64_string, size=(128, 128)):\n",
|
||||
" \"\"\"\n",
|
||||
" Resize an image encoded as a Base64 string.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" base64_string (str): Base64 string of the original image.\n",
|
||||
" size (tuple): Desired size of the image as (width, height).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" str: Base64 string of the resized image.\n",
|
||||
" \"\"\"\n",
|
||||
" # Decode the Base64 string\n",
|
||||
" img_data = base64.b64decode(base64_string)\n",
|
||||
" img = Image.open(io.BytesIO(img_data))\n",
|
||||
"\n",
|
||||
" # Resize the image\n",
|
||||
" resized_img = img.resize(size, Image.LANCZOS)\n",
|
||||
"\n",
|
||||
" # Save the resized image to a bytes buffer\n",
|
||||
" buffered = io.BytesIO()\n",
|
||||
" resized_img.save(buffered, format=img.format)\n",
|
||||
"\n",
|
||||
" # Encode the resized image to Base64\n",
|
||||
" return base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def is_base64(s):\n",
|
||||
" \"\"\"Check if a string is Base64 encoded\"\"\"\n",
|
||||
" try:\n",
|
||||
" return base64.b64encode(base64.b64decode(s)) == s.encode()\n",
|
||||
" except Exception:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def split_image_text_types(docs):\n",
|
||||
" \"\"\"Split numpy array images and texts\"\"\"\n",
|
||||
" images = []\n",
|
||||
" text = []\n",
|
||||
" for doc in docs:\n",
|
||||
" doc = doc.page_content # Extract Document contents\n",
|
||||
" if is_base64(doc):\n",
|
||||
" # Resize image to avoid OAI server error\n",
|
||||
" images.append(\n",
|
||||
" resize_base64_image(doc, size=(250, 250))\n",
|
||||
" ) # base64 encoded str\n",
|
||||
" else:\n",
|
||||
" text.append(doc)\n",
|
||||
" return {\"images\": images, \"texts\": text}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23a2c1d8-fea6-4152-b184-3172dd46c735",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Currently, we format the inputs using a `RunnableLambda` while we add image support to `ChatPromptTemplates`.\n",
|
||||
"\n",
|
||||
"Our runnable follows the classic RAG flow - \n",
|
||||
"\n",
|
||||
"* We first compute the context (both \"texts\" and \"images\" in this case) and the question (just a RunnablePassthrough here) \n",
|
||||
"* Then we pass this into our prompt template, which is a custom function that formats the message for the gpt-4-vision-preview model. \n",
|
||||
"* And finally we parse the output as a string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5d8919dc-c238-4746-86ba-45d940a7d260",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c93fab3-74c4-4f1d-958a-0bc4cdd0797e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def prompt_func(data_dict):\n",
|
||||
" # Joining the context texts into a single string\n",
|
||||
" formatted_texts = \"\\n\".join(data_dict[\"text_context\"][\"texts\"])\n",
|
||||
" messages = []\n",
|
||||
"\n",
|
||||
" # Adding image(s) to the messages if present\n",
|
||||
" if data_dict[\"image_context\"][\"images\"]:\n",
|
||||
" image_message = {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": f\"data:image/jpeg;base64,{data_dict['image_context']['images'][0]}\"\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" messages.append(image_message)\n",
|
||||
"\n",
|
||||
" # Adding the text message for analysis\n",
|
||||
" text_message = {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": (\n",
|
||||
" \"As an expert art critic and historian, your task is to analyze and interpret images, \"\n",
|
||||
" \"considering their historical and cultural significance. Alongside the images, you will be \"\n",
|
||||
" \"provided with related text to offer context. Both will be retrieved from a vectorstore based \"\n",
|
||||
" \"on user-input keywords. Please use your extensive knowledge and analytical skills to provide a \"\n",
|
||||
" \"comprehensive summary that includes:\\n\"\n",
|
||||
" \"- A detailed description of the visual elements in the image.\\n\"\n",
|
||||
" \"- The historical and cultural context of the image.\\n\"\n",
|
||||
" \"- An interpretation of the image's symbolism and meaning.\\n\"\n",
|
||||
" \"- Connections between the image and the related text.\\n\\n\"\n",
|
||||
" f\"User-provided keywords: {data_dict['question']}\\n\\n\"\n",
|
||||
" \"Text and / or tables:\\n\"\n",
|
||||
" f\"{formatted_texts}\"\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" messages.append(text_message)\n",
|
||||
"\n",
|
||||
" return [HumanMessage(content=messages)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0, model=\"gpt-4-vision-preview\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
"# RAG pipeline\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"text_context\": text_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"image_context\": image_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | RunnableLambda(prompt_func)\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1566096d-97c2-4ddc-ba4a-6ef88c525e4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test retrieval and run RAG"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90121e56-674b-473b-871d-6e4753fd0c45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import HTML, display\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def plt_img_base64(img_base64):\n",
|
||||
" # Create an HTML img tag with the base64 string as the source\n",
|
||||
" image_html = f'<img src=\"data:image/jpeg;base64,{img_base64}\" />'\n",
|
||||
"\n",
|
||||
" # Display the image by rendering the HTML\n",
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = text_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "44eaa532-f035-4c04-b578-02339d42554c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = image_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "69fb15fd-76fc-49b4-806d-c4db2990027d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke(\"Women with children\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "227f08b8-e732-4089-b65c-6eb6f9e48f15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the images retrieved in the LangSmith trace:\n",
|
||||
"\n",
|
||||
"LangSmith [trace](https://smith.langchain.com/public/69c558a5-49dc-4c60-a49b-3adbb70f74c5/r/e872c2c8-528c-468f-aefd-8b5cd730a673)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,880 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Oracle AI Vector Search with Document Processing\n",
|
||||
"Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords.\n",
|
||||
"One of the biggest benefits of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system.\n",
|
||||
"This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"\n",
|
||||
"In addition, your vectors can benefit from all of Oracle Database’s most powerful features, like the following:\n",
|
||||
"\n",
|
||||
" * [Partitioning Support](https://www.oracle.com/database/technologies/partitioning.html)\n",
|
||||
" * [Real Application Clusters scalability](https://www.oracle.com/database/real-application-clusters/)\n",
|
||||
" * [Exadata smart scans](https://www.oracle.com/database/technologies/exadata/software/smartscan/)\n",
|
||||
" * [Shard processing across geographically distributed databases](https://www.oracle.com/database/distributed-database/)\n",
|
||||
" * [Transactions](https://docs.oracle.com/en/database/oracle/oracle-database/23/cncpt/transactions.html)\n",
|
||||
" * [Parallel SQL](https://docs.oracle.com/en/database/oracle/oracle-database/21/vldbg/parallel-exec-intro.html#GUID-D28717E4-0F77-44F5-BB4E-234C31D4E4BA)\n",
|
||||
" * [Disaster recovery](https://www.oracle.com/database/data-guard/)\n",
|
||||
" * [Security](https://www.oracle.com/security/database-security/)\n",
|
||||
" * [Oracle Machine Learning](https://www.oracle.com/artificial-intelligence/database-machine-learning/)\n",
|
||||
" * [Oracle Graph Database](https://www.oracle.com/database/integrated-graph-database/)\n",
|
||||
" * [Oracle Spatial and Graph](https://www.oracle.com/database/spatial/)\n",
|
||||
" * [Oracle Blockchain](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_blockchain_table.html#GUID-B469E277-978E-4378-A8C1-26D3FF96C9A6)\n",
|
||||
" * [JSON](https://docs.oracle.com/en/database/oracle/oracle-database/23/adjsn/json-in-oracle-database.html)\n",
|
||||
"\n",
|
||||
"This guide demonstrates how Oracle AI Vector Search can be used with Langchain to serve an end-to-end RAG pipeline. This guide goes through examples of:\n",
|
||||
"\n",
|
||||
" * Loading the documents from various sources using OracleDocLoader\n",
|
||||
" * Summarizing them within/outside the database using OracleSummary\n",
|
||||
" * Generating embeddings for them within/outside the database using OracleEmbeddings\n",
|
||||
" * Chunking them according to different requirements using Advanced Oracle Capabilities from OracleTextSplitter\n",
|
||||
" * Storing and Indexing them in a Vector Store and querying them for queries in OracleVS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are just starting with Oracle Database, consider exploring the [free Oracle 23 AI](https://www.oracle.com/database/free/#resources) which provides a great introduction to setting up your database environment. While working with the database, it is often advisable to avoid using the system user by default; instead, you can create your own user for enhanced security and customization. For detailed steps on user creation, refer to our [end-to-end guide](https://github.com/langchain-ai/langchain/blob/master/cookbook/oracleai_demo.ipynb) which also shows how to set up a user in Oracle. Additionally, understanding user privileges is crucial for managing database security effectively. You can learn more about this topic in the official [Oracle guide](https://docs.oracle.com/en/database/oracle/oracle-database/19/admqs/administering-user-accounts-and-security.html#GUID-36B21D72-1BBB-46C9-A0C9-F0D2A8591B8D) on administering user accounts and security."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Please install Oracle Python Client driver to use Langchain with Oracle AI Vector Search. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install oracledb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Demo User\n",
|
||||
"First, create a demo user with all the required privileges. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n",
|
||||
"User setup done!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# Update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" try:\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- Drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error dropping user: ' || SQLERRM);\n",
|
||||
" end;\n",
|
||||
" \n",
|
||||
" -- Create user and grant privileges\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
" \n",
|
||||
" -- Network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db)\n",
|
||||
" );\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"User setup failed with error: {e}\")\n",
|
||||
" finally:\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(f\"Connection failed with error: {e}\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Process Documents using Oracle AI\n",
|
||||
"Consider the following scenario: users possess documents stored either in an Oracle Database or a file system and intend to utilize this data with Oracle AI Vector Search powered by Langchain.\n",
|
||||
"\n",
|
||||
"To prepare the documents for analysis, a comprehensive preprocessing workflow is necessary. Initially, the documents must be retrieved, summarized (if required), and chunked as needed. Subsequent steps involve generating embeddings for these chunks and integrating them into the Oracle AI Vector Store. Users can then conduct semantic searches on this data.\n",
|
||||
"\n",
|
||||
"The Oracle AI Vector Search Langchain library encompasses a suite of document processing tools that facilitate document loading, chunking, summary generation, and embedding creation.\n",
|
||||
"\n",
|
||||
"In the sections that follow, we will detail the utilization of Oracle AI Langchain APIs to effectively implement each of these processes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Demo User\n",
|
||||
"The following sample code will show how to connect to Oracle Database. By default, python-oracledb runs in a ‘Thin’ mode which connects directly to Oracle Database. This mode does not need Oracle Client libraries. However, some additional functionality is available when python-oracledb uses them. Python-oracledb is said to be in ‘Thick’ mode when Oracle Client libraries are used. Both modes have comprehensive functionality supporting the Python Database API v2.0 Specification. See the following [guide](https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_a.html#featuresummary) that talks about features supported in each mode. You might want to switch to thick-mode if you are unable to use thin-mode."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Populate a Demo Table\n",
|
||||
"Create a demo table and insert some sample documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Table created and populated.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
"\n",
|
||||
" drop_table_sql = \"\"\"drop table demo_tab\"\"\"\n",
|
||||
" cursor.execute(drop_table_sql)\n",
|
||||
"\n",
|
||||
" create_table_sql = \"\"\"create table demo_tab (id number, data clob)\"\"\"\n",
|
||||
" cursor.execute(create_table_sql)\n",
|
||||
"\n",
|
||||
" insert_row_sql = \"\"\"insert into demo_tab values (:1, :2)\"\"\"\n",
|
||||
" rows_to_insert = [\n",
|
||||
" (\n",
|
||||
" 1,\n",
|
||||
" \"If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\",\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" 2,\n",
|
||||
" \"A tablespace can be online (accessible) or offline (not accessible) whenever the database is open.\\nA tablespace is usually online so that its data is available to users. The SYSTEM tablespace and temporary tablespaces cannot be taken offline.\",\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" 3,\n",
|
||||
" \"The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table.\\nSometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\",\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" cursor.executemany(insert_row_sql, rows_to_insert)\n",
|
||||
"\n",
|
||||
" conn.commit()\n",
|
||||
"\n",
|
||||
" print(\"Table created and populated.\")\n",
|
||||
" cursor.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Table creation failed.\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the inclusion of a demo user and a populated sample table, the remaining configuration involves setting up embedding and summary functionalities. Users are presented with multiple provider options, including local database solutions and third-party services such as Ocigenai, Hugging Face, and OpenAI. Should users opt for a third-party provider, they are required to establish credentials containing the necessary authentication details. Conversely, if selecting a database as the provider for embeddings, it is necessary to upload an ONNX model to the Oracle Database. No additional setup is required for summary functionalities when using the database option."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load ONNX Model\n",
|
||||
"\n",
|
||||
"Oracle accommodates a variety of embedding providers, enabling users to choose between proprietary database solutions and third-party services such as OCIGENAI and HuggingFace. This selection dictates the methodology for generating and managing embeddings.\n",
|
||||
"\n",
|
||||
"***Important*** : Should users opt for the database option, they must upload an ONNX model into the Oracle Database. Conversely, if a third-party provider is selected for embedding generation, uploading an ONNX model to Oracle Database is not required.\n",
|
||||
"\n",
|
||||
"A significant advantage of utilizing an ONNX model directly within Oracle is the enhanced security and performance it offers by eliminating the need to transmit data to external parties. Additionally, this method avoids the latency typically associated with network or REST API calls.\n",
|
||||
"\n",
|
||||
"Below is the example code to upload an ONNX model into Oracle Database:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ONNX model loaded.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"\n",
|
||||
"# please update with your related information\n",
|
||||
"# make sure that you have onnx file in the system\n",
|
||||
"onnx_dir = \"DEMO_PY_DIR\"\n",
|
||||
"onnx_file = \"tinybert.onnx\"\n",
|
||||
"model_name = \"demo_model\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" OracleEmbeddings.load_onnx_model(conn, onnx_dir, onnx_file, model_name)\n",
|
||||
" print(\"ONNX model loaded.\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"ONNX model loading failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Credential\n",
|
||||
"\n",
|
||||
"When selecting third-party providers for generating embeddings, users are required to establish credentials to securely access the provider's endpoints.\n",
|
||||
"\n",
|
||||
"***Important:*** No credentials are necessary when opting for the 'database' provider to generate embeddings. However, should users decide to utilize a third-party provider, they must create credentials specific to the chosen provider.\n",
|
||||
"\n",
|
||||
"Below is an illustrative example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" declare\n",
|
||||
" jo json_object_t;\n",
|
||||
" begin\n",
|
||||
" -- HuggingFace\n",
|
||||
" dbms_vector_chain.drop_credential(credential_name => 'HF_CRED');\n",
|
||||
" jo := json_object_t();\n",
|
||||
" jo.put('access_token', '<access_token>');\n",
|
||||
" dbms_vector_chain.create_credential(\n",
|
||||
" credential_name => 'HF_CRED',\n",
|
||||
" params => json(jo.to_string));\n",
|
||||
"\n",
|
||||
" -- OCIGENAI\n",
|
||||
" dbms_vector_chain.drop_credential(credential_name => 'OCI_CRED');\n",
|
||||
" jo := json_object_t();\n",
|
||||
" jo.put('user_ocid','<user_ocid>');\n",
|
||||
" jo.put('tenancy_ocid','<tenancy_ocid>');\n",
|
||||
" jo.put('compartment_ocid','<compartment_ocid>');\n",
|
||||
" jo.put('private_key','<private_key>');\n",
|
||||
" jo.put('fingerprint','<fingerprint>');\n",
|
||||
" dbms_vector_chain.create_credential(\n",
|
||||
" credential_name => 'OCI_CRED',\n",
|
||||
" params => json(jo.to_string));\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" cursor.close()\n",
|
||||
" print(\"Credentials created.\")\n",
|
||||
"except Exception as ex:\n",
|
||||
" cursor.close()\n",
|
||||
" raise"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Documents\n",
|
||||
"Users have the flexibility to load documents from either the Oracle Database, a file system, or both, by appropriately configuring the loader parameters. For comprehensive details on these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-73397E89-92FB-48ED-94BB-1AD960C4EA1F).\n",
|
||||
"\n",
|
||||
"A significant advantage of utilizing OracleDocLoader is its capability to process over 150 distinct file formats, eliminating the need for multiple loaders for different document types. For a complete list of the supported formats, please refer to the [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html).\n",
|
||||
"\n",
|
||||
"Below is a sample code snippet that demonstrates how to use OracleDocLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of docs loaded: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleDocLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# loading from Oracle Database table\n",
|
||||
"# make sure you have the table with this specification\n",
|
||||
"loader_params = {}\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\"\"\" load the docs \"\"\"\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of docs loaded: {len(docs)}\")\n",
|
||||
"# print(f\"Document-0: {docs[0].page_content}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Summary\n",
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library offers a suite of APIs designed for document summarization. It supports multiple summarization providers such as Database, OCIGENAI, HuggingFace, among others, allowing users to select the provider that best meets their needs. To utilize these capabilities, users must configure the summary parameters as specified. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-EC9DDB58-6A15-4B36-BA66-ECBA20D2CE57)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party summary generation providers other than Oracle's in-house and default provider: 'database'. If you don't have proxy, please remove the proxy parameter when you instantiate the OracleSummary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# proxy to be used when we instantiate summary and embedder object\n",
|
||||
"proxy = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate summary:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of Summaries: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.utilities.oracleai import OracleSummary\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# using 'database' provider\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# get the summary instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"summ = OracleSummary(conn=conn, params=summary_params, proxy=proxy)\n",
|
||||
"\n",
|
||||
"list_summary = []\n",
|
||||
"for doc in docs:\n",
|
||||
" summary = summ.get_summary(doc.page_content)\n",
|
||||
" list_summary.append(summary)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of Summaries: {len(list_summary)}\")\n",
|
||||
"# print(f\"Summary-0: {list_summary[0]}\") #content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split Documents\n",
|
||||
"The documents may vary in size, ranging from small to very large. Users often prefer to chunk their documents into smaller sections to facilitate the generation of embeddings. A wide array of customization options is available for this splitting process. For comprehensive details regarding these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-4E145629-7098-4C7C-804F-FC85D1F24240).\n",
|
||||
"\n",
|
||||
"Below is a sample code illustrating how to implement this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of Chunks: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleTextSplitter\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# split by default parameters\n",
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"\n",
|
||||
"\"\"\" get the splitter instance \"\"\"\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"\n",
|
||||
"list_chunks = []\n",
|
||||
"for doc in docs:\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" list_chunks.extend(chunks)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of Chunks: {len(list_chunks)}\")\n",
|
||||
"# print(f\"Chunk-0: {list_chunks[0]}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Embeddings\n",
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** Users may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# proxy to be used when we instantiate summary and embedder object\n",
|
||||
"proxy = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of embeddings: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# using ONNX model loaded to Oracle Database\n",
|
||||
"embedder_params = {\"provider\": \"database\", \"model\": \"demo_model\"}\n",
|
||||
"\n",
|
||||
"# get the embedding instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"embedder = OracleEmbeddings(conn=conn, params=embedder_params, proxy=proxy)\n",
|
||||
"\n",
|
||||
"embeddings = []\n",
|
||||
"for doc in docs:\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" for chunk in chunks:\n",
|
||||
" embed = embedder.embed_query(chunk)\n",
|
||||
" embeddings.append(embed)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of embeddings: {len(embeddings)}\")\n",
|
||||
"# print(f\"Embedding-0: {embeddings[0]}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Oracle AI Vector Store\n",
|
||||
"Now that you know how to use Oracle AI Langchain library APIs individually to process the documents, let us show how to integrate with Oracle AI Vector Store to facilitate the semantic searches."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, let's import all the dependencies."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"from langchain_community.document_loaders.oracleai import (\n",
|
||||
" OracleDocLoader,\n",
|
||||
" OracleTextSplitter,\n",
|
||||
")\n",
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"from langchain_community.utilities.oracleai import OracleSummary\n",
|
||||
"from langchain_community.vectorstores import oraclevs\n",
|
||||
"from langchain_community.vectorstores.oraclevs import OracleVS\n",
|
||||
"from langchain_community.vectorstores.utils import DistanceStrategy\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's combine all document processing stages together. Here is the sample code below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n",
|
||||
"ONNX model loaded.\n",
|
||||
"Number of total chunks with metadata: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\"\"\"\n",
|
||||
"In this sample example, we will use 'database' provider for both summary and embeddings.\n",
|
||||
"So, we don't need to do the followings:\n",
|
||||
" - set proxy for 3rd party providers\n",
|
||||
" - create credential for 3rd party providers\n",
|
||||
"\n",
|
||||
"If you choose to use 3rd party provider, \n",
|
||||
"please follow the necessary steps for proxy and credential.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# oracle connection\n",
|
||||
"# please update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# load onnx model\n",
|
||||
"# please update with your related information\n",
|
||||
"onnx_dir = \"DEMO_PY_DIR\"\n",
|
||||
"onnx_file = \"tinybert.onnx\"\n",
|
||||
"model_name = \"demo_model\"\n",
|
||||
"try:\n",
|
||||
" OracleEmbeddings.load_onnx_model(conn, onnx_dir, onnx_file, model_name)\n",
|
||||
" print(\"ONNX model loaded.\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"ONNX model loading failed!\")\n",
|
||||
" sys.exit(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# params\n",
|
||||
"# please update necessary fields with related information\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
"}\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
"}\n",
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"embedder_params = {\"provider\": \"database\", \"model\": \"demo_model\"}\n",
|
||||
"\n",
|
||||
"# instantiate loader, summary, splitter, and embedder\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"summary = OracleSummary(conn=conn, params=summary_params)\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"embedder = OracleEmbeddings(conn=conn, params=embedder_params)\n",
|
||||
"\n",
|
||||
"# process the documents\n",
|
||||
"chunks_with_mdata = []\n",
|
||||
"for id, doc in enumerate(docs, start=1):\n",
|
||||
" summ = summary.get_summary(doc.page_content)\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" for ic, chunk in enumerate(chunks, start=1):\n",
|
||||
" chunk_metadata = doc.metadata.copy()\n",
|
||||
" chunk_metadata[\"id\"] = chunk_metadata[\"_oid\"] + \"$\" + str(id) + \"$\" + str(ic)\n",
|
||||
" chunk_metadata[\"document_id\"] = str(id)\n",
|
||||
" chunk_metadata[\"document_summary\"] = str(summ[0])\n",
|
||||
" chunks_with_mdata.append(\n",
|
||||
" Document(page_content=str(chunk), metadata=chunk_metadata)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of total chunks with metadata: {len(chunks_with_mdata)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"At this point, we have processed the documents and generated chunks with metadata. Next, we will create Oracle AI Vector Store with those chunks.\n",
|
||||
"\n",
|
||||
"Here is the sample code how to do that:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Vector Store Table: oravs\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create Oracle AI Vector Store\n",
|
||||
"vectorstore = OracleVS.from_documents(\n",
|
||||
" chunks_with_mdata,\n",
|
||||
" embedder,\n",
|
||||
" client=conn,\n",
|
||||
" table_name=\"oravs\",\n",
|
||||
" distance_strategy=DistanceStrategy.DOT_PRODUCT,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Vector Store Table: {vectorstore.table_name}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The example provided illustrates the creation of a vector store using the DOT_PRODUCT distance strategy. Users have the flexibility to employ various distance strategies with the Oracle AI Vector Store, as detailed in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With embeddings now stored in vector stores, it is advisable to establish an index to enhance semantic search performance during query execution.\n",
|
||||
"\n",
|
||||
"***Note*** Should you encounter an \"insufficient memory\" error, it is recommended to increase the ***vector_memory_size*** in your database configuration\n",
|
||||
"\n",
|
||||
"Below is a sample code snippet for creating an index:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"oraclevs.create_index(\n",
|
||||
" conn, vectorstore, params={\"idx_name\": \"hnsw_oravs\", \"idx_type\": \"HNSW\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Index created.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example demonstrates the creation of a default HNSW index on embeddings within the 'oravs' table. Users may adjust various parameters according to their specific needs. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/vecse/manage-different-categories-vector-indexes.html).\n",
|
||||
"\n",
|
||||
"Additionally, various types of vector indices can be created to meet diverse requirements. More details can be found in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Perform Semantic Search\n",
|
||||
"All set!\n",
|
||||
"\n",
|
||||
"We have successfully processed the documents and stored them in the vector store, followed by the creation of an index to enhance query performance. We are now prepared to proceed with semantic searches.\n",
|
||||
"\n",
|
||||
"Below is the sample code for this process:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table. Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.', metadata={'_oid': '662f2f257677f3c2311a8ff999fd34e5', '_rowid': 'AAAR/xAAEAAAAAnAAC', 'id': '662f2f257677f3c2311a8ff999fd34e5$3$1', 'document_id': '3', 'document_summary': 'Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\\n\\n'})]\n",
|
||||
"[]\n",
|
||||
"[(Document(page_content='The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table. Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.', metadata={'_oid': '662f2f257677f3c2311a8ff999fd34e5', '_rowid': 'AAAR/xAAEAAAAAnAAC', 'id': '662f2f257677f3c2311a8ff999fd34e5$3$1', 'document_id': '3', 'document_summary': 'Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\\n\\n'}), 0.055675752460956573)]\n",
|
||||
"[]\n",
|
||||
"[Document(page_content='If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.', metadata={'_oid': '662f2f253acf96b33b430b88699490a2', '_rowid': 'AAAR/xAAEAAAAAnAAA', 'id': '662f2f253acf96b33b430b88699490a2$1$1', 'document_id': '1', 'document_summary': 'If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\\n\\n'})]\n",
|
||||
"[Document(page_content='If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.', metadata={'_oid': '662f2f253acf96b33b430b88699490a2', '_rowid': 'AAAR/xAAEAAAAAnAAA', 'id': '662f2f253acf96b33b430b88699490a2$1$1', 'document_id': '1', 'document_summary': 'If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\\n\\n'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is Oracle AI Vector Store?\"\n",
|
||||
"filter = {\"document_id\": [\"1\"]}\n",
|
||||
"\n",
|
||||
"# Similarity search without a filter\n",
|
||||
"print(vectorstore.similarity_search(query, 1))\n",
|
||||
"\n",
|
||||
"# Similarity search with a filter\n",
|
||||
"print(vectorstore.similarity_search(query, 1, filter=filter))\n",
|
||||
"\n",
|
||||
"# Similarity search with relevance score\n",
|
||||
"print(vectorstore.similarity_search_with_score(query, 1))\n",
|
||||
"\n",
|
||||
"# Similarity search with relevance score with filter\n",
|
||||
"print(vectorstore.similarity_search_with_score(query, 1, filter=filter))\n",
|
||||
"\n",
|
||||
"# Max marginal relevance search\n",
|
||||
"print(vectorstore.max_marginal_relevance_search(query, 1, fetch_k=20, lambda_mult=0.5))\n",
|
||||
"\n",
|
||||
"# Max marginal relevance search with filter\n",
|
||||
"print(\n",
|
||||
" vectorstore.max_marginal_relevance_search(\n",
|
||||
" query, 1, fetch_k=20, lambda_mult=0.5, filter=filter\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -129,7 +129,7 @@
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"retriever = KayAiRetriever.create(\n",
|
||||
" dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n",
|
||||
")\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,82 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG using Upstage Layout Analysis and Groundedness Check\n",
|
||||
"This example illustrates RAG using [Upstage](https://python.langchain.com/docs/integrations/providers/upstage/) Layout Analysis and Groundedness Check."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import DocArrayInMemorySearch\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.runnables.base import RunnableSerializable\n",
|
||||
"from langchain_upstage import (\n",
|
||||
" ChatUpstage,\n",
|
||||
" UpstageEmbeddings,\n",
|
||||
" UpstageGroundednessCheck,\n",
|
||||
" UpstageLayoutAnalysisLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = ChatUpstage()\n",
|
||||
"\n",
|
||||
"files = [\"/PATH/TO/YOUR/FILE.pdf\", \"/PATH/TO/YOUR/FILE2.pdf\"]\n",
|
||||
"\n",
|
||||
"loader = UpstageLayoutAnalysisLoader(file_path=files, split=\"element\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(\n",
|
||||
" docs, embedding=UpstageEmbeddings(model=\"solar-embedding-1-large\")\n",
|
||||
")\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"How many parameters in SOLAR model?\")\n",
|
||||
"\n",
|
||||
"groundedness_check = UpstageGroundednessCheck()\n",
|
||||
"groundedness = \"\"\n",
|
||||
"while groundedness != \"grounded\":\n",
|
||||
" chain: RunnableSerializable = RunnablePassthrough() | prompt | model | output_parser\n",
|
||||
"\n",
|
||||
" result = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"context\": retrieved_docs,\n",
|
||||
" \"question\": \"How many parameters in SOLAR model?\",\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" groundedness = groundedness_check.invoke(\n",
|
||||
" {\n",
|
||||
" \"context\": retrieved_docs,\n",
|
||||
" \"answer\": result,\n",
|
||||
" }\n",
|
||||
" )"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -39,10 +39,12 @@
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# noqa\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa\n",
|
||||
"\n",
|
||||
"DOCSTORE_DIR = \".\"\n",
|
||||
"DOCSTORE_ID_KEY = \"doc_id\""
|
||||
|
||||
@@ -274,7 +274,7 @@
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" CONNECTION_STRING\n",
|
||||
") # We reconnect to db so the new columns are loaded as well.\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
|
||||
"\n",
|
||||
"sql_query_chain = (\n",
|
||||
" RunnablePassthrough.assign(schema=get_schema)\n",
|
||||
|
||||
@@ -245,7 +245,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"def _parse(text):\n",
|
||||
" return text.strip('\"').strip(\"**\")"
|
||||
" return text.strip(\"**\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,32 +1,28 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SalesGPT - Context-Aware AI Sales Assistant With Knowledge Base and Ability Generate Stripe Payment Links\n",
|
||||
"# SalesGPT - Your Context-Aware AI Sales Assistant With Knowledge Base\n",
|
||||
"\n",
|
||||
"This notebook demonstrates an implementation of a **Context-Aware** AI Sales agent with a Product Knowledge Base which can actually close sales. \n",
|
||||
"This notebook demonstrates an implementation of a **Context-Aware** AI Sales agent with a Product Knowledge Base. \n",
|
||||
"\n",
|
||||
"This notebook was originally published at [filipmichalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) by [@FilipMichalsky](https://twitter.com/FilipMichalsky).\n",
|
||||
"\n",
|
||||
"SalesGPT is context-aware, which means it can understand what section of a sales conversation it is in and act accordingly.\n",
|
||||
" \n",
|
||||
"As such, this agent can have a natural sales conversation with a prospect and behaves based on the conversation stage. Hence, this notebook demonstrates how we can use AI to automate sales development representatives activites, such as outbound sales calls. \n",
|
||||
"As such, this agent can have a natural sales conversation with a prospect and behaves based on the conversation stage. Hence, this notebook demonstrates how we can use AI to automate sales development representatives activities, such as outbound sales calls. \n",
|
||||
"\n",
|
||||
"Additionally, the AI Sales agent has access to tools, which allow it to interact with other systems.\n",
|
||||
"\n",
|
||||
"Here, we show how the AI Sales Agent can use a **Product Knowledge Base** to speak about a particular's company offerings,\n",
|
||||
"hence increasing relevance and reducing hallucinations.\n",
|
||||
"\n",
|
||||
"Furthermore, we show how our AI Sales Agent can **generate sales** by integration with the AI Agent Highway called [Mindware](https://www.mindware.co/). In practice, this allows the agent to autonomously generate a payment link for your customers **to pay for your products via Stripe**.\n",
|
||||
"\n",
|
||||
"We leverage the [`langchain`](https://github.com/hwchase17/langchain) library in this implementation, specifically [Custom Agent Configuration](https://langchain-langchain.vercel.app/docs/modules/agents/how_to/custom_agent_with_tool_retrieval) and are inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) architecture ."
|
||||
"We leverage the [`langchain`](https://github.com/langchain-ai/langchain) library in this implementation, specifically [Custom Agent Configuration](https://langchain-langchain.vercel.app/docs/modules/agents/how_to/custom_agent_with_tool_retrieval) and are inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) architecture ."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -42,10 +38,9 @@
|
||||
"import os\n",
|
||||
"import re\n",
|
||||
"\n",
|
||||
"# make sure you have .env file saved locally with your API keys\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"load_dotenv()\n",
|
||||
"# import your OpenAI key\n",
|
||||
"OPENAI_API_KEY = \"sk-xx\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n",
|
||||
"\n",
|
||||
"from typing import Any, Callable, Dict, List, Union\n",
|
||||
"\n",
|
||||
@@ -54,18 +49,27 @@
|
||||
"from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS\n",
|
||||
"from langchain.chains import LLMChain, RetrievalQA\n",
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.llms import BaseLLM\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.prompts.base import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import BaseLLM\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install additional dependencies\n",
|
||||
"# ! pip install chromadb openai tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -73,21 +77,19 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"1. Seed the SalesGPT agent\n",
|
||||
"2. Run Sales Agent to decide what to do:\n",
|
||||
"\n",
|
||||
" a) Use a tool, such as look up Product Information in a Knowledge Base or Generate a Payment Link\n",
|
||||
" a) Use a tool, such as look up Product Information in a Knowledge Base\n",
|
||||
" \n",
|
||||
" b) Output a response to a user \n",
|
||||
"3. Run Sales Stage Recognition Agent to recognize which stage is the sales agent at and adjust their behaviour accordingly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -96,17 +98,15 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Architecture diagram\n",
|
||||
"\n",
|
||||
"<img src=\"https://demo-bucket-45.s3.amazonaws.com/new_flow2.png\" width=\"800\" height=\"440\">\n"
|
||||
"<img src=\"https://singularity-assets-public.s3.amazonaws.com/new_flow.png\" width=\"800\" height=\"440\"/>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -131,7 +131,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -149,7 +149,7 @@
|
||||
" {conversation_history}\n",
|
||||
" ===\n",
|
||||
"\n",
|
||||
" Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting ony from the following options:\n",
|
||||
" Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting only from the following options:\n",
|
||||
" 1. Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.\n",
|
||||
" 2. Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.\n",
|
||||
" 3. Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.\n",
|
||||
@@ -171,7 +171,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -223,7 +223,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -240,17 +240,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# test the intermediate chains\n",
|
||||
"verbose = True\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-4-turbo-preview\",\n",
|
||||
" temperature=0.9,\n",
|
||||
" openai_api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
|
||||
")\n",
|
||||
"llm = ChatOpenAI(temperature=0.9)\n",
|
||||
"\n",
|
||||
"stage_analyzer_chain = StageAnalyzerChain.from_llm(llm, verbose=verbose)\n",
|
||||
"\n",
|
||||
@@ -261,7 +257,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -280,7 +276,7 @@
|
||||
" \n",
|
||||
" ===\n",
|
||||
"\n",
|
||||
" Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting ony from the following options:\n",
|
||||
" Now determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting only from the following options:\n",
|
||||
" 1. Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.\n",
|
||||
" 2. Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.\n",
|
||||
" 3. Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.\n",
|
||||
@@ -300,21 +296,21 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'conversation_history': '', 'text': '1'}"
|
||||
"'1'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stage_analyzer_chain.invoke({\"conversation_history\": \"\"})"
|
||||
"stage_analyzer_chain.run(conversation_history=\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -356,44 +352,32 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'salesperson_name': 'Ted Lasso',\n",
|
||||
" 'salesperson_role': 'Business Development Representative',\n",
|
||||
" 'company_name': 'Sleep Haven',\n",
|
||||
" 'company_business': 'Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers.',\n",
|
||||
" 'company_values': \"Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service.\",\n",
|
||||
" 'conversation_purpose': 'find out whether they are looking to achieve better sleep via buying a premier mattress.',\n",
|
||||
" 'conversation_history': 'Hello, this is Ted Lasso from Sleep Haven. How are you doing today? <END_OF_TURN>\\nUser: I am well, howe are you?<END_OF_TURN>',\n",
|
||||
" 'conversation_type': 'call',\n",
|
||||
" 'conversation_stage': 'Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.',\n",
|
||||
" 'text': \"I'm doing well, thank you for asking. The reason I'm calling is to discuss how Sleep Haven can help enhance your sleep quality with our premium mattresses. Are you currently looking for ways to achieve a better night's sleep? <END_OF_TURN>\"}"
|
||||
"\"I'm doing great, thank you for asking! As a Business Development Representative at Sleep Haven, I wanted to reach out to see if you are looking to achieve a better night's sleep. We provide premium mattresses that offer the most comfortable and supportive sleeping experience possible. Are you interested in exploring our sleep solutions? <END_OF_TURN>\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sales_conversation_utterance_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"salesperson_name\": \"Ted Lasso\",\n",
|
||||
" \"salesperson_role\": \"Business Development Representative\",\n",
|
||||
" \"company_name\": \"Sleep Haven\",\n",
|
||||
" \"company_business\": \"Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers.\",\n",
|
||||
" \"company_values\": \"Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service.\",\n",
|
||||
" \"conversation_purpose\": \"find out whether they are looking to achieve better sleep via buying a premier mattress.\",\n",
|
||||
" \"conversation_history\": \"Hello, this is Ted Lasso from Sleep Haven. How are you doing today? <END_OF_TURN>\\nUser: I am well, howe are you?<END_OF_TURN>\",\n",
|
||||
" \"conversation_type\": \"call\",\n",
|
||||
" \"conversation_stage\": conversation_stages.get(\n",
|
||||
" \"1\",\n",
|
||||
" \"Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.\",\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
"sales_conversation_utterance_chain.run(\n",
|
||||
" salesperson_name=\"Ted Lasso\",\n",
|
||||
" salesperson_role=\"Business Development Representative\",\n",
|
||||
" company_name=\"Sleep Haven\",\n",
|
||||
" company_business=\"Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers.\",\n",
|
||||
" company_values=\"Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service.\",\n",
|
||||
" conversation_purpose=\"find out whether they are looking to achieve better sleep via buying a premier mattress.\",\n",
|
||||
" conversation_history=\"Hello, this is Ted Lasso from Sleep Haven. How are you doing today? <END_OF_TURN>\\nUser: I am well, howe are you?<END_OF_TURN>\",\n",
|
||||
" conversation_type=\"call\",\n",
|
||||
" conversation_stage=conversation_stages.get(\n",
|
||||
" \"1\",\n",
|
||||
" \"Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.\",\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -401,7 +385,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -412,7 +395,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -446,7 +429,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -462,7 +445,7 @@
|
||||
" text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)\n",
|
||||
" texts = text_splitter.split_text(product_catalog)\n",
|
||||
"\n",
|
||||
" llm = ChatOpenAI(temperature=0)\n",
|
||||
" llm = OpenAI(temperature=0)\n",
|
||||
" embeddings = OpenAIEmbeddings()\n",
|
||||
" docsearch = Chroma.from_texts(\n",
|
||||
" texts, embeddings, collection_name=\"product-knowledge-base\"\n",
|
||||
@@ -471,12 +454,29 @@
|
||||
" knowledge_base = RetrievalQA.from_chain_type(\n",
|
||||
" llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
|
||||
" )\n",
|
||||
" return knowledge_base"
|
||||
" return knowledge_base\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(product_catalog):\n",
|
||||
" # query to get_tools can be used to be embedded and relevant tools found\n",
|
||||
" # see here: https://langchain-langchain.vercel.app/docs/use_cases/agents/custom_agent_with_plugin_retrieval#tool-retriever\n",
|
||||
"\n",
|
||||
" # we only use one tool for now, but this is highly extensible!\n",
|
||||
" knowledge_base = setup_knowledge_base(product_catalog)\n",
|
||||
" tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"ProductSearch\",\n",
|
||||
" func=knowledge_base.run,\n",
|
||||
" description=\"useful for when you need to answer questions about product information\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" return tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -485,18 +485,16 @@
|
||||
"text": [
|
||||
"Created a chunk of size 940, which is longer than the specified 10\n",
|
||||
"Created a chunk of size 844, which is longer than the specified 10\n",
|
||||
"Created a chunk of size 837, which is longer than the specified 10\n",
|
||||
"/Users/filipmichalsky/Odyssey/sales_bot/SalesGPT/env/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `run` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. Use invoke instead.\n",
|
||||
" warn_deprecated(\n"
|
||||
"Created a chunk of size 837, which is longer than the specified 10\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The Sleep Haven products available are:\\n\\n1. Luxury Cloud-Comfort Memory Foam Mattress\\n2. Classic Harmony Spring Mattress\\n3. EcoGreen Hybrid Latex Mattress\\n4. Plush Serenity Bamboo Mattress\\n\\nEach product has its unique features and price point.'"
|
||||
"' We have four products available: the Classic Harmony Spring Mattress, the Plush Serenity Bamboo Mattress, the Luxury Cloud-Comfort Memory Foam Mattress, and the EcoGreen Hybrid Latex Mattress. Each product is available in different sizes, with the Classic Harmony Spring Mattress available in Queen and King sizes, the Plush Serenity Bamboo Mattress available in King size, the Luxury Cloud-Comfort Memory Foam Mattress available in Twin, Queen, and King sizes, and the EcoGreen Hybrid Latex Mattress available in Twin and Full sizes.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -510,199 +508,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Payment gateway"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to set up your AI agent to use a payment gateway to generate payment links for your users you need two things:\n",
|
||||
"\n",
|
||||
"1. Sign up for a Stripe account and obtain a STRIPE API KEY\n",
|
||||
"2. Create products you would like to sell in the Stripe UI. Then follow out example of `example_product_price_id_mapping.json`\n",
|
||||
"to feed the product name to price_id mapping which allows you to generate the payment links."
|
||||
"### Set up the SalesGPT Controller with the Sales Agent and Stage Analyzer and a Knowledge Base"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from litellm import completion\n",
|
||||
"\n",
|
||||
"# set GPT model env variable\n",
|
||||
"os.environ[\"GPT_MODEL\"] = \"gpt-4-turbo-preview\"\n",
|
||||
"\n",
|
||||
"product_price_id_mapping = {\n",
|
||||
" \"ai-consulting-services\": \"price_1Ow8ofB795AYY8p1goWGZi6m\",\n",
|
||||
" \"Luxury Cloud-Comfort Memory Foam Mattress\": \"price_1Owv99B795AYY8p1mjtbKyxP\",\n",
|
||||
" \"Classic Harmony Spring Mattress\": \"price_1Owv9qB795AYY8p1tPcxCM6T\",\n",
|
||||
" \"EcoGreen Hybrid Latex Mattress\": \"price_1OwvLDB795AYY8p1YBAMBcbi\",\n",
|
||||
" \"Plush Serenity Bamboo Mattress\": \"price_1OwvMQB795AYY8p1hJN2uS3S\",\n",
|
||||
"}\n",
|
||||
"with open(\"example_product_price_id_mapping.json\", \"w\") as f:\n",
|
||||
" json.dump(product_price_id_mapping, f)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_product_id_from_query(query, product_price_id_mapping_path):\n",
|
||||
" # Load product_price_id_mapping from a JSON file\n",
|
||||
" with open(product_price_id_mapping_path, \"r\") as f:\n",
|
||||
" product_price_id_mapping = json.load(f)\n",
|
||||
"\n",
|
||||
" # Serialize the product_price_id_mapping to a JSON string for inclusion in the prompt\n",
|
||||
" product_price_id_mapping_json_str = json.dumps(product_price_id_mapping)\n",
|
||||
"\n",
|
||||
" # Dynamically create the enum list from product_price_id_mapping keys\n",
|
||||
" enum_list = list(product_price_id_mapping.values()) + [\n",
|
||||
" \"No relevant product id found\"\n",
|
||||
" ]\n",
|
||||
" enum_list_str = json.dumps(enum_list)\n",
|
||||
"\n",
|
||||
" prompt = f\"\"\"\n",
|
||||
" You are an expert data scientist and you are working on a project to recommend products to customers based on their needs.\n",
|
||||
" Given the following query:\n",
|
||||
" {query}\n",
|
||||
" and the following product price id mapping:\n",
|
||||
" {product_price_id_mapping_json_str}\n",
|
||||
" return the price id that is most relevant to the query.\n",
|
||||
" ONLY return the price id, no other text. If no relevant price id is found, return 'No relevant price id found'.\n",
|
||||
" Your output will follow this schema:\n",
|
||||
" {{\n",
|
||||
" \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n",
|
||||
" \"title\": \"Price ID Response\",\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {{\n",
|
||||
" \"price_id\": {{\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"enum\": {enum_list_str}\n",
|
||||
" }}\n",
|
||||
" }},\n",
|
||||
" \"required\": [\"price_id\"]\n",
|
||||
" }}\n",
|
||||
" Return a valid directly parsable json, dont return in it within a code snippet or add any kind of explanation!!\n",
|
||||
" \"\"\"\n",
|
||||
" prompt += \"{\"\n",
|
||||
" response = completion(\n",
|
||||
" model=os.getenv(\"GPT_MODEL\", \"gpt-3.5-turbo-1106\"),\n",
|
||||
" messages=[{\"content\": prompt, \"role\": \"user\"}],\n",
|
||||
" max_tokens=1000,\n",
|
||||
" temperature=0,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" product_id = response.choices[0].message.content.strip()\n",
|
||||
" return product_id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_stripe_payment_link(query: str) -> str:\n",
|
||||
" \"\"\"Generate a stripe payment link for a customer based on a single query string.\"\"\"\n",
|
||||
"\n",
|
||||
" # example testing payment gateway url\n",
|
||||
" PAYMENT_GATEWAY_URL = os.getenv(\n",
|
||||
" \"PAYMENT_GATEWAY_URL\", \"https://agent-payments-gateway.vercel.app/payment\"\n",
|
||||
" )\n",
|
||||
" PRODUCT_PRICE_MAPPING = \"example_product_price_id_mapping.json\"\n",
|
||||
"\n",
|
||||
" # use LLM to get the price_id from query\n",
|
||||
" price_id = get_product_id_from_query(query, PRODUCT_PRICE_MAPPING)\n",
|
||||
" price_id = json.loads(price_id)\n",
|
||||
" payload = json.dumps(\n",
|
||||
" {\"prompt\": query, **price_id, \"stripe_key\": os.getenv(\"STRIPE_API_KEY\")}\n",
|
||||
" )\n",
|
||||
" headers = {\n",
|
||||
" \"Content-Type\": \"application/json\",\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" response = requests.request(\n",
|
||||
" \"POST\", PAYMENT_GATEWAY_URL, headers=headers, data=payload\n",
|
||||
" )\n",
|
||||
" return response.text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'{\"response\":\"https://buy.stripe.com/test_6oEbLS8JB1F9bv229d\"}'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_stripe_payment_link(\n",
|
||||
" query=\"Please generate a payment link for John Doe to buy two mattresses - the Classic Harmony Spring Mattress\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup agent tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_tools(product_catalog):\n",
|
||||
" # query to get_tools can be used to be embedded and relevant tools found\n",
|
||||
" # see here: https://langchain-langchain.vercel.app/docs/use_cases/agents/custom_agent_with_plugin_retrieval#tool-retriever\n",
|
||||
"\n",
|
||||
" # we only use one tool for now, but this is highly extensible!\n",
|
||||
" knowledge_base = setup_knowledge_base(product_catalog)\n",
|
||||
" tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"ProductSearch\",\n",
|
||||
" func=knowledge_base.run,\n",
|
||||
" description=\"useful for when you need to answer questions about product information or services offered, availability and their costs.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"GeneratePaymentLink\",\n",
|
||||
" func=generate_stripe_payment_link,\n",
|
||||
" description=\"useful to close a transaction with a customer. You need to include product name and quantity and customer name in the query input.\",\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" return tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up the SalesGPT Controller with the Sales Agent and Stage Analyzer\n",
|
||||
"\n",
|
||||
"#### The Agent has access to a Knowledge Base and can autonomously sell your products via Stripe"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -752,11 +563,19 @@
|
||||
" print(\"TEXT\")\n",
|
||||
" print(text)\n",
|
||||
" print(\"-------\")\n",
|
||||
" if f\"{self.ai_prefix}:\" in text:\n",
|
||||
" return AgentFinish(\n",
|
||||
" {\"output\": text.split(f\"{self.ai_prefix}:\")[-1].strip()}, text\n",
|
||||
" )\n",
|
||||
" regex = r\"Action: (.*?)[\\n]*Action Input: (.*)\"\n",
|
||||
" match = re.search(regex, text)\n",
|
||||
" if not match:\n",
|
||||
" ## TODO - this is not entirely reliable, sometimes results in an error.\n",
|
||||
" return AgentFinish(\n",
|
||||
" {\"output\": text.split(f\"{self.ai_prefix}:\")[-1].strip()}, text\n",
|
||||
" {\n",
|
||||
" \"output\": \"I apologize, I was unable to find the answer to your question. Is there anything else I can help with?\"\n",
|
||||
" },\n",
|
||||
" text,\n",
|
||||
" )\n",
|
||||
" # raise OutputParserException(f\"Could not parse LLM output: `{text}`\")\n",
|
||||
" action = match.group(1)\n",
|
||||
@@ -770,7 +589,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -828,18 +647,18 @@
|
||||
"Previous conversation history:\n",
|
||||
"{conversation_history}\n",
|
||||
"\n",
|
||||
"Thought:\n",
|
||||
"{salesperson_name}:\n",
|
||||
"{agent_scratchpad}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SalesGPT(Chain):\n",
|
||||
"class SalesGPT(Chain, BaseModel):\n",
|
||||
" \"\"\"Controller model for the Sales Agent.\"\"\"\n",
|
||||
"\n",
|
||||
" conversation_history: List[str] = []\n",
|
||||
@@ -985,9 +804,7 @@
|
||||
"\n",
|
||||
" # WARNING: this output parser is NOT reliable yet\n",
|
||||
" ## It makes assumptions about output from LLM which can break and throw an error\n",
|
||||
" output_parser = SalesConvoOutputParser(\n",
|
||||
" ai_prefix=kwargs[\"salesperson_name\"], verbose=verbose\n",
|
||||
" )\n",
|
||||
" output_parser = SalesConvoOutputParser(ai_prefix=kwargs[\"salesperson_name\"])\n",
|
||||
"\n",
|
||||
" sales_agent_with_tools = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
@@ -1011,7 +828,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -1019,7 +835,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -1028,7 +843,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -1065,7 +880,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -1074,7 +888,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1083,9 +897,7 @@
|
||||
"text": [
|
||||
"Created a chunk of size 940, which is longer than the specified 10\n",
|
||||
"Created a chunk of size 844, which is longer than the specified 10\n",
|
||||
"Created a chunk of size 837, which is longer than the specified 10\n",
|
||||
"/Users/filipmichalsky/Odyssey/sales_bot/SalesGPT/env/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The class `langchain.agents.agent.LLMSingleActionAgent` was deprecated in langchain 0.1.0 and will be removed in 0.2.0. Use Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc. instead.\n",
|
||||
" warn_deprecated(\n"
|
||||
"Created a chunk of size 837, which is longer than the specified 10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1095,7 +907,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -1105,7 +917,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1122,14 +934,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ted Lasso: Good day! This is Ted Lasso from Sleep Haven. How are you doing today?\n"
|
||||
"Ted Lasso: Hello, this is Ted Lasso from Sleep Haven. How are you doing today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1139,18 +951,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sales_agent.human_step(\n",
|
||||
" \"I am well, how are you? I would like to learn more about your services.\"\n",
|
||||
" \"I am well, how are you? I would like to learn more about your mattresses.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1167,14 +979,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ted Lasso: I'm doing great, thank you for asking! I'm glad to hear you're interested. Sleep Haven is a premium mattress company, and we're all about offering the best sleep solutions, including top-notch mattresses, pillows, and bedding accessories. Our mission is to help you achieve a better night's sleep. May I know if you're looking to enhance your sleep experience with a new mattress or bedding accessories? \n"
|
||||
"Ted Lasso: I'm glad to hear that you're doing well! As for our mattresses, at Sleep Haven, we provide customers with the most comfortable and supportive sleeping experience possible. Our high-quality mattresses are designed to meet the unique needs of our customers. Can I ask what specifically you'd like to learn more about? \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1184,18 +996,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sales_agent.human_step(\n",
|
||||
" \"Yes, I would like to improve my sleep. Can you tell me more about your products?\"\n",
|
||||
")"
|
||||
"sales_agent.human_step(\"Yes, what materials are you mattresses made from?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1212,14 +1022,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ted Lasso: Absolutely, I'd be happy to share more about our products. At Sleep Haven, we offer a variety of high-quality mattresses designed to cater to different sleeping preferences and needs. Whether you're looking for memory foam's comfort, the support of hybrid mattresses, or the breathability of natural latex, we have options for everyone. Our pillows and bedding accessories are similarly curated to enhance your sleep quality. Every product is built with the aim of helping you achieve the restful night's sleep you deserve. What specific features are you looking for in a mattress? \n"
|
||||
"Ted Lasso: Our mattresses are made from a variety of materials, depending on the model. We have the EcoGreen Hybrid Latex Mattress, which is made from 100% natural latex harvested from eco-friendly plantations. The Plush Serenity Bamboo Mattress features a layer of plush, adaptive foam and a base of high-resilience support foam, with a bamboo-infused top layer. The Luxury Cloud-Comfort Memory Foam Mattress has an innovative, temperature-sensitive memory foam layer and a high-density foam base with cooling gel-infused particles. Finally, the Classic Harmony Spring Mattress has a robust inner spring construction and layers of plush padding, with a quilted top layer and a natural cotton cover. Is there anything specific you'd like to know about these materials?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1229,16 +1039,61 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sales_agent.human_step(\"What mattresses do you have and how much do they cost?\")"
|
||||
"sales_agent.human_step(\n",
|
||||
" \"Yes, I am looking for a queen sized mattress. Do you have any mattresses in queen size?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Conversation Stage: Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sales_agent.determine_conversation_stage()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ted Lasso: Yes, we do have queen-sized mattresses available. We offer the Luxury Cloud-Comfort Memory Foam Mattress and the Classic Harmony Spring Mattress in queen size. Both mattresses provide exceptional comfort and support. Is there anything specific you would like to know about these options?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sales_agent.step()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sales_agent.human_step(\"Yea, compare and contrast those two options, please.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1255,14 +1110,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ted Lasso: We offer two primary types of mattresses at Sleep Haven. The first is our Luxury Cloud-Comfort Memory Foam Mattress, which is priced at $999 and comes in Twin, Queen, and King sizes. The second is our Classic Harmony Spring Mattress, priced at $1,299, available in Queen and King sizes. Both are designed to provide exceptional comfort and support for a better night's sleep. Which type of mattress would you be interested in learning more about? \n"
|
||||
"Ted Lasso: The Luxury Cloud-Comfort Memory Foam Mattress is priced at $999 and is available in Twin, Queen, and King sizes. It features an innovative, temperature-sensitive memory foam layer and a high-density foam base. On the other hand, the Classic Harmony Spring Mattress is priced at $1,299 and is available in Queen and King sizes. It features a robust inner spring construction and layers of plush padding. Both mattresses provide exceptional comfort and support, but the Classic Harmony Spring Mattress may be a better option if you prefer the traditional feel of an inner spring mattress. Do you have any other questions about these options?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1272,66 +1127,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sales_agent.human_step(\n",
|
||||
" \"Okay.I would like to order two Memory Foam mattresses in Twin size please.\"\n",
|
||||
" \"Great, thanks, that's it. I will talk to my wife and call back if she is onboard. Have a good day!\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Conversation Stage: Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sales_agent.determine_conversation_stage()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Ted Lasso: Fantastic choice! You're on your way to a better night's sleep with our Luxury Cloud-Comfort Memory Foam Mattresses. I've generated a payment link for two Twin size mattresses for you. Here is the link to complete your purchase: https://buy.stripe.com/test_6oEg28e3V97BdDabJn. Is there anything else I can assist you with today? \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sales_agent.step()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sales_agent.human_step(\n",
|
||||
" \"Great, thanks! I will discuss with my wife and will buy it if she is onboard. Have a good day!\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -1350,9 +1153,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -355,15 +355,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attribute_info[-2][\"description\"] += (\n",
|
||||
" f\". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}\"\n",
|
||||
")\n",
|
||||
"attribute_info[3][\"description\"] += (\n",
|
||||
" f\". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}\"\n",
|
||||
")\n",
|
||||
"attribute_info[-3][\"description\"] += (\n",
|
||||
" f\". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}\"\n",
|
||||
")"
|
||||
"attribute_info[-2][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[3][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[-3][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -688,9 +688,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attribute_info[-3][\"description\"] += (\n",
|
||||
" \". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.\"\n",
|
||||
")\n",
|
||||
"attribute_info[-3][\n",
|
||||
" \"description\"\n",
|
||||
"] += \". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.\"\n",
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" doc_contents,\n",
|
||||
@@ -1227,7 +1227,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = retriever.invoke(\n",
|
||||
"results = retriever.get_relevant_documents(\n",
|
||||
" \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
|
||||
@@ -22,8 +22,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent, create_react_agent\n",
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
@@ -85,7 +84,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = hub.pull(\"hwchase17/react\")"
|
||||
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
|
||||
"suffix = \"\"\"Begin!\"\n",
|
||||
"\n",
|
||||
"{chat_history}\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -103,14 +114,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = OpenAI()\n",
|
||||
"agent = create_react_agent(model, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)"
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True, memory=memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 6,
|
||||
"id": "ca4bc1fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -120,15 +133,15 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: I should research ChatGPT to answer this question.\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"ChatGPT\"\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001B[0m\n",
|
||||
"Action Input: \"ChatGPT\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -140,40 +153,10 @@
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
|
||||
"\u001B[0;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
|
||||
"Cell \u001B[0;32mIn[36], line 1\u001B[0m\n\u001B[0;32m----> 1\u001B[0m \u001B[43magent_executor\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43minvoke\u001B[49m\u001B[43m(\u001B[49m\u001B[43m{\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43minput\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m:\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mWhat is ChatGPT?\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m}\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/chains/base.py:163\u001B[0m, in \u001B[0;36mChain.invoke\u001B[0;34m(self, input, config, **kwargs)\u001B[0m\n\u001B[1;32m 161\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mBaseException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 162\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_chain_error(e)\n\u001B[0;32m--> 163\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m e\n\u001B[1;32m 164\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_chain_end(outputs)\n\u001B[1;32m 166\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m include_run_info:\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/chains/base.py:153\u001B[0m, in \u001B[0;36mChain.invoke\u001B[0;34m(self, input, config, **kwargs)\u001B[0m\n\u001B[1;32m 150\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 151\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_validate_inputs(inputs)\n\u001B[1;32m 152\u001B[0m outputs \u001B[38;5;241m=\u001B[39m (\n\u001B[0;32m--> 153\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call\u001B[49m\u001B[43m(\u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 154\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_arg_supported\n\u001B[1;32m 155\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_call(inputs)\n\u001B[1;32m 156\u001B[0m )\n\u001B[1;32m 158\u001B[0m final_outputs: Dict[\u001B[38;5;28mstr\u001B[39m, Any] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mprep_outputs(\n\u001B[1;32m 159\u001B[0m inputs, outputs, return_only_outputs\n\u001B[1;32m 160\u001B[0m )\n\u001B[1;32m 161\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mBaseException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1432\u001B[0m, in \u001B[0;36mAgentExecutor._call\u001B[0;34m(self, inputs, run_manager)\u001B[0m\n\u001B[1;32m 1430\u001B[0m \u001B[38;5;66;03m# We now enter the agent loop (until it returns something).\u001B[39;00m\n\u001B[1;32m 1431\u001B[0m \u001B[38;5;28;01mwhile\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_should_continue(iterations, time_elapsed):\n\u001B[0;32m-> 1432\u001B[0m next_step_output \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_take_next_step\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1433\u001B[0m \u001B[43m \u001B[49m\u001B[43mname_to_tool_map\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1434\u001B[0m \u001B[43m \u001B[49m\u001B[43mcolor_mapping\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1435\u001B[0m \u001B[43m \u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1436\u001B[0m \u001B[43m \u001B[49m\u001B[43mintermediate_steps\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1437\u001B[0m \u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1438\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1439\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(next_step_output, AgentFinish):\n\u001B[1;32m 1440\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_return(\n\u001B[1;32m 1441\u001B[0m next_step_output, intermediate_steps, run_manager\u001B[38;5;241m=\u001B[39mrun_manager\n\u001B[1;32m 1442\u001B[0m )\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1138\u001B[0m, in \u001B[0;36mAgentExecutor._take_next_step\u001B[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001B[0m\n\u001B[1;32m 1129\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_take_next_step\u001B[39m(\n\u001B[1;32m 1130\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 1131\u001B[0m name_to_tool_map: Dict[\u001B[38;5;28mstr\u001B[39m, BaseTool],\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 1135\u001B[0m run_manager: Optional[CallbackManagerForChainRun] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 1136\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Union[AgentFinish, List[Tuple[AgentAction, \u001B[38;5;28mstr\u001B[39m]]]:\n\u001B[1;32m 1137\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_consume_next_step(\n\u001B[0;32m-> 1138\u001B[0m [\n\u001B[1;32m 1139\u001B[0m a\n\u001B[1;32m 1140\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m a \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_iter_next_step(\n\u001B[1;32m 1141\u001B[0m name_to_tool_map,\n\u001B[1;32m 1142\u001B[0m color_mapping,\n\u001B[1;32m 1143\u001B[0m inputs,\n\u001B[1;32m 1144\u001B[0m intermediate_steps,\n\u001B[1;32m 1145\u001B[0m run_manager,\n\u001B[1;32m 1146\u001B[0m )\n\u001B[1;32m 1147\u001B[0m ]\n\u001B[1;32m 1148\u001B[0m )\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1138\u001B[0m, in \u001B[0;36m<listcomp>\u001B[0;34m(.0)\u001B[0m\n\u001B[1;32m 1129\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_take_next_step\u001B[39m(\n\u001B[1;32m 1130\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 1131\u001B[0m name_to_tool_map: Dict[\u001B[38;5;28mstr\u001B[39m, BaseTool],\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 1135\u001B[0m run_manager: Optional[CallbackManagerForChainRun] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 1136\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Union[AgentFinish, List[Tuple[AgentAction, \u001B[38;5;28mstr\u001B[39m]]]:\n\u001B[1;32m 1137\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_consume_next_step(\n\u001B[0;32m-> 1138\u001B[0m [\n\u001B[1;32m 1139\u001B[0m a\n\u001B[1;32m 1140\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m a \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_iter_next_step(\n\u001B[1;32m 1141\u001B[0m name_to_tool_map,\n\u001B[1;32m 1142\u001B[0m color_mapping,\n\u001B[1;32m 1143\u001B[0m inputs,\n\u001B[1;32m 1144\u001B[0m intermediate_steps,\n\u001B[1;32m 1145\u001B[0m run_manager,\n\u001B[1;32m 1146\u001B[0m )\n\u001B[1;32m 1147\u001B[0m ]\n\u001B[1;32m 1148\u001B[0m )\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1223\u001B[0m, in \u001B[0;36mAgentExecutor._iter_next_step\u001B[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001B[0m\n\u001B[1;32m 1221\u001B[0m \u001B[38;5;28;01myield\u001B[39;00m agent_action\n\u001B[1;32m 1222\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m agent_action \u001B[38;5;129;01min\u001B[39;00m actions:\n\u001B[0;32m-> 1223\u001B[0m \u001B[38;5;28;01myield\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_perform_agent_action\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1224\u001B[0m \u001B[43m \u001B[49m\u001B[43mname_to_tool_map\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcolor_mapping\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43magent_action\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\n\u001B[1;32m 1225\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1245\u001B[0m, in \u001B[0;36mAgentExecutor._perform_agent_action\u001B[0;34m(self, name_to_tool_map, color_mapping, agent_action, run_manager)\u001B[0m\n\u001B[1;32m 1243\u001B[0m tool_run_kwargs[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mllm_prefix\u001B[39m\u001B[38;5;124m\"\u001B[39m] \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 1244\u001B[0m \u001B[38;5;66;03m# We then call the tool on the tool input to get an observation\u001B[39;00m\n\u001B[0;32m-> 1245\u001B[0m observation \u001B[38;5;241m=\u001B[39m \u001B[43mtool\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrun\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1246\u001B[0m \u001B[43m \u001B[49m\u001B[43magent_action\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtool_input\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1247\u001B[0m \u001B[43m \u001B[49m\u001B[43mverbose\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mverbose\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1248\u001B[0m \u001B[43m \u001B[49m\u001B[43mcolor\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mcolor\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1249\u001B[0m \u001B[43m \u001B[49m\u001B[43mcallbacks\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mget_child\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mif\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01melse\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 1250\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_run_kwargs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1251\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1252\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 1253\u001B[0m tool_run_kwargs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39magent\u001B[38;5;241m.\u001B[39mtool_run_logging_kwargs()\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:422\u001B[0m, in \u001B[0;36mBaseTool.run\u001B[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001B[0m\n\u001B[1;32m 420\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m (\u001B[38;5;167;01mException\u001B[39;00m, \u001B[38;5;167;01mKeyboardInterrupt\u001B[39;00m) \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 421\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_tool_error(e)\n\u001B[0;32m--> 422\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m e\n\u001B[1;32m 423\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 424\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_tool_end(observation, color\u001B[38;5;241m=\u001B[39mcolor, name\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mname, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:381\u001B[0m, in \u001B[0;36mBaseTool.run\u001B[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001B[0m\n\u001B[1;32m 378\u001B[0m parsed_input \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_parse_input(tool_input)\n\u001B[1;32m 379\u001B[0m tool_args, tool_kwargs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_to_args_and_kwargs(parsed_input)\n\u001B[1;32m 380\u001B[0m observation \u001B[38;5;241m=\u001B[39m (\n\u001B[0;32m--> 381\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_run\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_args\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_kwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 382\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_arg_supported\n\u001B[1;32m 383\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_run(\u001B[38;5;241m*\u001B[39mtool_args, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mtool_kwargs)\n\u001B[1;32m 384\u001B[0m )\n\u001B[1;32m 385\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m ValidationError \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 386\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhandle_validation_error:\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:588\u001B[0m, in \u001B[0;36mTool._run\u001B[0;34m(self, run_manager, *args, **kwargs)\u001B[0m\n\u001B[1;32m 579\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc:\n\u001B[1;32m 580\u001B[0m new_argument_supported \u001B[38;5;241m=\u001B[39m signature(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc)\u001B[38;5;241m.\u001B[39mparameters\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcallbacks\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m 581\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m (\n\u001B[1;32m 582\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc(\n\u001B[1;32m 583\u001B[0m \u001B[38;5;241m*\u001B[39margs,\n\u001B[1;32m 584\u001B[0m callbacks\u001B[38;5;241m=\u001B[39mrun_manager\u001B[38;5;241m.\u001B[39mget_child() \u001B[38;5;28;01mif\u001B[39;00m run_manager \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 585\u001B[0m \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs,\n\u001B[1;32m 586\u001B[0m )\n\u001B[1;32m 587\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_argument_supported\n\u001B[0;32m--> 588\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 589\u001B[0m )\n\u001B[1;32m 590\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mNotImplementedError\u001B[39;00m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mTool does not support sync\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/community/langchain_community/utilities/google_search.py:94\u001B[0m, in \u001B[0;36mGoogleSearchAPIWrapper.run\u001B[0;34m(self, query)\u001B[0m\n\u001B[1;32m 92\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Run query through GoogleSearch and parse result.\"\"\"\u001B[39;00m\n\u001B[1;32m 93\u001B[0m snippets \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m---> 94\u001B[0m results \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_google_search_results\u001B[49m\u001B[43m(\u001B[49m\u001B[43mquery\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mnum\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mk\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 95\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(results) \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m0\u001B[39m:\n\u001B[1;32m 96\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mNo good Google Search Result was found\u001B[39m\u001B[38;5;124m\"\u001B[39m\n",
|
||||
"File \u001B[0;32m~/code/langchain/libs/community/langchain_community/utilities/google_search.py:62\u001B[0m, in \u001B[0;36mGoogleSearchAPIWrapper._google_search_results\u001B[0;34m(self, search_term, **kwargs)\u001B[0m\n\u001B[1;32m 60\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msiterestrict:\n\u001B[1;32m 61\u001B[0m cse \u001B[38;5;241m=\u001B[39m cse\u001B[38;5;241m.\u001B[39msiterestrict()\n\u001B[0;32m---> 62\u001B[0m res \u001B[38;5;241m=\u001B[39m \u001B[43mcse\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mlist\u001B[49m\u001B[43m(\u001B[49m\u001B[43mq\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43msearch_term\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcx\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgoogle_cse_id\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mexecute\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 63\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m res\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mitems\u001B[39m\u001B[38;5;124m\"\u001B[39m, [])\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/_helpers.py:130\u001B[0m, in \u001B[0;36mpositional.<locals>.positional_decorator.<locals>.positional_wrapper\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 128\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m positional_parameters_enforcement \u001B[38;5;241m==\u001B[39m POSITIONAL_WARNING:\n\u001B[1;32m 129\u001B[0m logger\u001B[38;5;241m.\u001B[39mwarning(message)\n\u001B[0;32m--> 130\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mwrapped\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/http.py:923\u001B[0m, in \u001B[0;36mHttpRequest.execute\u001B[0;34m(self, http, num_retries)\u001B[0m\n\u001B[1;32m 920\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mheaders[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcontent-length\u001B[39m\u001B[38;5;124m\"\u001B[39m] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mstr\u001B[39m(\u001B[38;5;28mlen\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mbody))\n\u001B[1;32m 922\u001B[0m \u001B[38;5;66;03m# Handle retries for server-side errors.\u001B[39;00m\n\u001B[0;32m--> 923\u001B[0m resp, content \u001B[38;5;241m=\u001B[39m \u001B[43m_retry_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 924\u001B[0m \u001B[43m \u001B[49m\u001B[43mhttp\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 925\u001B[0m \u001B[43m \u001B[49m\u001B[43mnum_retries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 926\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mrequest\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[1;32m 927\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_sleep\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 928\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_rand\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 929\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43muri\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 930\u001B[0m \u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmethod\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 931\u001B[0m \u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 932\u001B[0m \u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 933\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 935\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m callback \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mresponse_callbacks:\n\u001B[1;32m 936\u001B[0m callback(resp)\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/http.py:191\u001B[0m, in \u001B[0;36m_retry_request\u001B[0;34m(http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs)\u001B[0m\n\u001B[1;32m 189\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 190\u001B[0m exception \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m--> 191\u001B[0m resp, content \u001B[38;5;241m=\u001B[39m \u001B[43mhttp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43muri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 192\u001B[0m \u001B[38;5;66;03m# Retry on SSL errors and socket timeout errors.\u001B[39;00m\n\u001B[1;32m 193\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m _ssl_SSLError \u001B[38;5;28;01mas\u001B[39;00m ssl_error:\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1724\u001B[0m, in \u001B[0;36mHttp.request\u001B[0;34m(self, uri, method, body, headers, redirections, connection_type)\u001B[0m\n\u001B[1;32m 1722\u001B[0m content \u001B[38;5;241m=\u001B[39m \u001B[38;5;124mb\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 1723\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1724\u001B[0m (response, content) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1725\u001B[0m \u001B[43m \u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mauthority\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43muri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrequest_uri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mredirections\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcachekey\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1726\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1727\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 1728\u001B[0m is_timeout \u001B[38;5;241m=\u001B[39m \u001B[38;5;28misinstance\u001B[39m(e, socket\u001B[38;5;241m.\u001B[39mtimeout)\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1444\u001B[0m, in \u001B[0;36mHttp._request\u001B[0;34m(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey)\u001B[0m\n\u001B[1;32m 1441\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth:\n\u001B[1;32m 1442\u001B[0m auth\u001B[38;5;241m.\u001B[39mrequest(method, request_uri, headers, body)\n\u001B[0;32m-> 1444\u001B[0m (response, content) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_conn_request\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrequest_uri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1446\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth:\n\u001B[1;32m 1447\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth\u001B[38;5;241m.\u001B[39mresponse(response, body):\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1366\u001B[0m, in \u001B[0;36mHttp._conn_request\u001B[0;34m(self, conn, request_uri, method, body, headers)\u001B[0m\n\u001B[1;32m 1364\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 1365\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m conn\u001B[38;5;241m.\u001B[39msock \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m-> 1366\u001B[0m \u001B[43mconn\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1367\u001B[0m conn\u001B[38;5;241m.\u001B[39mrequest(method, request_uri, body, headers)\n\u001B[1;32m 1368\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m socket\u001B[38;5;241m.\u001B[39mtimeout:\n",
|
||||
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1156\u001B[0m, in \u001B[0;36mHTTPSConnectionWithTimeout.connect\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 1154\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m has_timeout(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout):\n\u001B[1;32m 1155\u001B[0m sock\u001B[38;5;241m.\u001B[39msettimeout(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout)\n\u001B[0;32m-> 1156\u001B[0m \u001B[43msock\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhost\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mport\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1158\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msock \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_context\u001B[38;5;241m.\u001B[39mwrap_socket(sock, server_hostname\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhost)\n\u001B[1;32m 1160\u001B[0m \u001B[38;5;66;03m# Python 3.3 compatibility: emulate the check_hostname behavior\u001B[39;00m\n",
|
||||
"\u001B[0;31mKeyboardInterrupt\u001B[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"input\": \"What is ChatGPT?\"})"
|
||||
"agent_chain.run(input=\"What is ChatGPT?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -196,15 +179,15 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: I need to find out who developed ChatGPT\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Who developed ChatGPT\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
|
||||
"Final Answer: ChatGPT was developed by OpenAI.\u001B[0m\n",
|
||||
"Action Input: Who developed ChatGPT\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -219,7 +202,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"input\": \"Who developed it?\"})"
|
||||
"agent_chain.run(input=\"Who developed it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -234,14 +217,14 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
|
||||
"Action: Summary\n",
|
||||
"Action Input: My daughter 5 years old\u001B[0m\n",
|
||||
"Action Input: My daughter 5 years old\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001B[32;1m\u001B[1;3mThis is a conversation between a human and a bot:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
|
||||
"\n",
|
||||
"Human: What is ChatGPT?\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
|
||||
@@ -249,16 +232,16 @@
|
||||
"AI: ChatGPT was developed by OpenAI.\n",
|
||||
"\n",
|
||||
"Write a summary of the conversation for My daughter 5 years old:\n",
|
||||
"\u001B[0m\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3m\n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001B[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m\n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -273,8 +256,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke(\n",
|
||||
" {\"input\": \"Thanks. Summarize the conversation, for my daughter 5 years old.\"}\n",
|
||||
"agent_chain.run(\n",
|
||||
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -306,17 +289,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(agent_executor.memory.buffer)"
|
||||
"print(agent_chain.memory.buffer)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84ca95c30e262e00",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc3d0aa4",
|
||||
@@ -365,9 +340,25 @@
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"hwchase17/react\")\n",
|
||||
"agent = create_react_agent(model, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)"
|
||||
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
|
||||
"suffix = \"\"\"Begin!\"\n",
|
||||
"\n",
|
||||
"{chat_history}\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
|
||||
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True, memory=memory\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -382,15 +373,15 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: I should research ChatGPT to answer this question.\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"ChatGPT\"\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001B[0m\n",
|
||||
"Action Input: \"ChatGPT\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -405,7 +396,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"input\": \"What is ChatGPT?\"})"
|
||||
"agent_chain.run(input=\"What is ChatGPT?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -420,15 +411,15 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: I need to find out who developed ChatGPT\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Who developed ChatGPT\u001B[0m\n",
|
||||
"Observation: \u001B[36;1m\u001B[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
|
||||
"Final Answer: ChatGPT was developed by OpenAI.\u001B[0m\n",
|
||||
"Action Input: Who developed ChatGPT\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -443,7 +434,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"input\": \"Who developed it?\"})"
|
||||
"agent_chain.run(input=\"Who developed it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -458,14 +449,14 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
|
||||
"Action: Summary\n",
|
||||
"Action Input: My daughter 5 years old\u001B[0m\n",
|
||||
"Action Input: My daughter 5 years old\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001B[32;1m\u001B[1;3mThis is a conversation between a human and a bot:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
|
||||
"\n",
|
||||
"Human: What is ChatGPT?\n",
|
||||
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
|
||||
@@ -473,16 +464,16 @@
|
||||
"AI: ChatGPT was developed by OpenAI.\n",
|
||||
"\n",
|
||||
"Write a summary of the conversation for My daughter 5 years old:\n",
|
||||
"\u001B[0m\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001B[33;1m\u001B[1;3m\n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001B[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m\n",
|
||||
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -497,8 +488,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke(\n",
|
||||
" {\"input\": \"Thanks. Summarize the conversation, for my daughter 5 years old.\"}\n",
|
||||
"agent_chain.run(\n",
|
||||
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -533,7 +524,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(agent_executor.memory.buffer)"
|
||||
"print(agent_chain.memory.buffer)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -647,7 +647,7 @@ Sometimes you may not have the luxury of using OpenAI or other service-hosted la
|
||||
import logging
|
||||
import torch
|
||||
from transformers import AutoTokenizer, GPT2TokenizerFast, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
||||
from langchain_huggingface import HuggingFacePipeline
|
||||
from langchain_community.llms import HuggingFacePipeline
|
||||
|
||||
# Note: This model requires a large GPU, e.g. an 80GB A100. See documentation for other ways to run private non-OpenAI models.
|
||||
model_id = "google/flan-ul2"
|
||||
@@ -992,7 +992,7 @@ Now that you have some examples (with manually corrected output SQL), you can do
|
||||
```python
|
||||
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
|
||||
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
|
||||
from langchain_community.vectorstores import Chroma
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
" \n",
|
||||
"[Together AI](https://python.langchain.com/docs/integrations/llms/together) has a broad set of OSS LLMs via inference API.\n",
|
||||
"\n",
|
||||
"See [here](https://docs.together.ai/docs/inference-models). We use `\"mistralai/Mixtral-8x7B-Instruct-v0.1` for RAG on the Mixtral paper.\n",
|
||||
"See [here](https://api.together.xyz/playground). We use `\"mistralai/Mixtral-8x7B-Instruct-v0.1` for RAG on the Mixtral paper.\n",
|
||||
"\n",
|
||||
"Download the paper:\n",
|
||||
"https://arxiv.org/pdf/2401.04088.pdf"
|
||||
@@ -148,7 +148,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c48812ed-35bd-4fbe-9a2c-6c7335e5645e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(x: float, y: float) -> float:\n",
|
||||
" \"\"\"Multiply 'x' times 'y'.\"\"\"\n",
|
||||
" return x * y\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def exponentiate(x: float, y: float) -> float:\n",
|
||||
" \"\"\"Raise 'x' to the 'y'.\"\"\"\n",
|
||||
" return x**y\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(x: float, y: float) -> float:\n",
|
||||
" \"\"\"Add 'x' and 'y'.\"\"\"\n",
|
||||
" return x + y\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [multiply, exponentiate, add]\n",
|
||||
"\n",
|
||||
"gpt35 = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0).bind_tools(tools)\n",
|
||||
"claude3 = ChatAnthropic(model=\"claude-3-sonnet-20240229\").bind_tools(tools)\n",
|
||||
"llm_with_tools = gpt35.configurable_alternatives(\n",
|
||||
" ConfigurableField(id=\"llm\"), default_key=\"gpt35\", claude3=claude3\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9c186263-1b98-4cb2-b6d1-71f65eb0d811",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LangGraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "28fc2c60-7dbc-428a-8983-1a6a15ea30d2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import operator\n",
|
||||
"from typing import Annotated, Sequence, TypedDict\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langgraph.graph import END, StateGraph\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class AgentState(TypedDict):\n",
|
||||
" messages: Annotated[Sequence[BaseMessage], operator.add]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def should_continue(state):\n",
|
||||
" return \"continue\" if state[\"messages\"][-1].tool_calls else \"end\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def call_model(state, config):\n",
|
||||
" return {\"messages\": [llm_with_tools.invoke(state[\"messages\"], config=config)]}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _invoke_tool(tool_call):\n",
|
||||
" tool = {tool.name: tool for tool in tools}[tool_call[\"name\"]]\n",
|
||||
" return ToolMessage(tool.invoke(tool_call[\"args\"]), tool_call_id=tool_call[\"id\"])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool_executor = RunnableLambda(_invoke_tool)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def call_tools(state):\n",
|
||||
" last_message = state[\"messages\"][-1]\n",
|
||||
" return {\"messages\": tool_executor.batch(last_message.tool_calls)}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"workflow = StateGraph(AgentState)\n",
|
||||
"workflow.add_node(\"agent\", call_model)\n",
|
||||
"workflow.add_node(\"action\", call_tools)\n",
|
||||
"workflow.set_entry_point(\"agent\")\n",
|
||||
"workflow.add_conditional_edges(\n",
|
||||
" \"agent\",\n",
|
||||
" should_continue,\n",
|
||||
" {\n",
|
||||
" \"continue\": \"action\",\n",
|
||||
" \"end\": END,\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"workflow.add_edge(\"action\", \"agent\")\n",
|
||||
"graph = workflow.compile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3710e724-2595-4625-ba3a-effb81e66e4a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'messages': [HumanMessage(content=\"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6yMU2WsS4Bqgi1WxFHxtfJRc', 'function': {'arguments': '{\"x\": 8, \"y\": 2.743}', 'name': 'exponentiate'}, 'type': 'function'}, {'id': 'call_GAL3dQiKFF9XEV0RrRLPTvVp', 'function': {'arguments': '{\"x\": 17.24, \"y\": -918.1241}', 'name': 'add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 58, 'prompt_tokens': 168, 'total_tokens': 226}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-528302fc-7acf-4c11-82c4-119ccf40c573-0', tool_calls=[{'name': 'exponentiate', 'args': {'x': 8, 'y': 2.743}, 'id': 'call_6yMU2WsS4Bqgi1WxFHxtfJRc'}, {'name': 'add', 'args': {'x': 17.24, 'y': -918.1241}, 'id': 'call_GAL3dQiKFF9XEV0RrRLPTvVp'}]),\n",
|
||||
" ToolMessage(content='300.03770462067547', tool_call_id='call_6yMU2WsS4Bqgi1WxFHxtfJRc'),\n",
|
||||
" ToolMessage(content='-900.8841', tool_call_id='call_GAL3dQiKFF9XEV0RrRLPTvVp'),\n",
|
||||
" AIMessage(content='The result of \\\\(3 + 5^{2.743}\\\\) is approximately 300.04, and the result of \\\\(17.24 - 918.1241\\\\) is approximately -900.88.', response_metadata={'token_usage': {'completion_tokens': 44, 'prompt_tokens': 251, 'total_tokens': 295}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'stop', 'logprobs': None}, id='run-d1161669-ed09-4b18-94bd-6d8530df5aa8-0')]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"graph.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "073c074e-d722-42e0-85ec-c62c079207e4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'messages': [HumanMessage(content=\"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"),\n",
|
||||
" AIMessage(content=[{'text': \"Okay, let's break this down into two parts:\", 'type': 'text'}, {'id': 'toolu_01DEhqcXkXTtzJAiZ7uMBeDC', 'input': {'x': 3, 'y': 5}, 'name': 'add', 'type': 'tool_use'}], response_metadata={'id': 'msg_01AkLGH8sxMHaH15yewmjwkF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 450, 'output_tokens': 81}}, id='run-f35bfae8-8ded-4f8a-831b-0940d6ad16b6-0', tool_calls=[{'name': 'add', 'args': {'x': 3, 'y': 5}, 'id': 'toolu_01DEhqcXkXTtzJAiZ7uMBeDC'}]),\n",
|
||||
" ToolMessage(content='8.0', tool_call_id='toolu_01DEhqcXkXTtzJAiZ7uMBeDC'),\n",
|
||||
" AIMessage(content=[{'id': 'toolu_013DyMLrvnrto33peAKMGMr1', 'input': {'x': 8.0, 'y': 2.743}, 'name': 'exponentiate', 'type': 'tool_use'}], response_metadata={'id': 'msg_015Fmp8aztwYcce2JDAFfce3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 545, 'output_tokens': 75}}, id='run-48aaeeeb-a1e5-48fd-a57a-6c3da2907b47-0', tool_calls=[{'name': 'exponentiate', 'args': {'x': 8.0, 'y': 2.743}, 'id': 'toolu_013DyMLrvnrto33peAKMGMr1'}]),\n",
|
||||
" ToolMessage(content='300.03770462067547', tool_call_id='toolu_013DyMLrvnrto33peAKMGMr1'),\n",
|
||||
" AIMessage(content=[{'text': 'So 3 plus 5 raised to the 2.743 power is 300.04.\\n\\nFor the second part:', 'type': 'text'}, {'id': 'toolu_01UTmMrGTmLpPrPCF1rShN46', 'input': {'x': 17.24, 'y': -918.1241}, 'name': 'add', 'type': 'tool_use'}], response_metadata={'id': 'msg_015TkhfRBENPib2RWAxkieH6', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 638, 'output_tokens': 105}}, id='run-45fb62e3-d102-4159-881d-241c5dbadeed-0', tool_calls=[{'name': 'add', 'args': {'x': 17.24, 'y': -918.1241}, 'id': 'toolu_01UTmMrGTmLpPrPCF1rShN46'}]),\n",
|
||||
" ToolMessage(content='-900.8841', tool_call_id='toolu_01UTmMrGTmLpPrPCF1rShN46'),\n",
|
||||
" AIMessage(content='Therefore, 17.24 - 918.1241 = -900.8841', response_metadata={'id': 'msg_01LgKnRuUcSyADCpxv9tPoYD', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 759, 'output_tokens': 24}}, id='run-1008254e-ccd1-497c-8312-9550dd77bd08-0')]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"graph.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" },\n",
|
||||
" config={\"configurable\": {\"llm\": \"claude3\"}},\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -3811,7 +3811,7 @@
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -424,7 +424,7 @@
|
||||
" DialogueAgentWithTools(\n",
|
||||
" name=name,\n",
|
||||
" system_message=SystemMessage(content=system_message),\n",
|
||||
" model=ChatOpenAI(model=\"gpt-4\", temperature=0.2),\n",
|
||||
" model=ChatOpenAI(model_name=\"gpt-4\", temperature=0.2),\n",
|
||||
" tool_names=tools,\n",
|
||||
" top_k_results=2,\n",
|
||||
" )\n",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Video Captioning\n",
|
||||
"This notebook shows how to use VideoCaptioningChain, which is implemented using Langchain's ImageCaptionLoader and AssemblyAI to produce .srt files.\n",
|
||||
"\n",
|
||||
"This system autogenerates both subtitles and closed captions from a video URL."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installing Dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install ffmpeg-python\n",
|
||||
"# !pip install assemblyai\n",
|
||||
"# !pip install opencv-python\n",
|
||||
"# !pip install torch\n",
|
||||
"# !pip install pillow\n",
|
||||
"# !pip install transformers\n",
|
||||
"# !pip install langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-11-30T03:39:14.078232Z",
|
||||
"start_time": "2023-11-30T03:39:12.534410Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"from langchain.chains.video_captioning import VideoCaptioningChain\n",
|
||||
"from langchain.chat_models.openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up API Keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-11-30T03:39:17.423806Z",
|
||||
"start_time": "2023-11-30T03:39:17.417945Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OPENAI_API_KEY = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"\n",
|
||||
"ASSEMBLYAI_API_KEY = getpass.getpass(\"AssemblyAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Required parameters:**\n",
|
||||
"\n",
|
||||
"* llm: The language model this chain will use to get suggestions on how to refine the closed-captions\n",
|
||||
"* assemblyai_key: The API key for AssemblyAI, used to generate the subtitles\n",
|
||||
"\n",
|
||||
"**Optional Parameters:**\n",
|
||||
"\n",
|
||||
"* verbose (Default: True): Sets verbose mode for downstream chain calls\n",
|
||||
"* use_logging (Default: True): Log the chain's processes in run manager\n",
|
||||
"* frame_skip (Default: None): Choose how many video frames to skip during processing. Increasing it results in faster execution, but less accurate results. If None, frame skip is calculated manually based on the framerate Set this to 0 to sample all frames\n",
|
||||
"* image_delta_threshold (Default: 3000000): Set the sensitivity for what the image processor considers a change in scenery in the video, used to delimit closed captions. Higher = less sensitive\n",
|
||||
"* closed_caption_char_limit (Default: 20): Sets the character limit on closed captions\n",
|
||||
"* closed_caption_similarity_threshold (Default: 80): Sets the percentage value to how similar two closed caption models should be in order to be clustered into one longer closed caption\n",
|
||||
"* use_unclustered_video_models (Default: False): If true, closed captions that could not be clustered will be included. May result in spontaneous behaviour from closed captions such as very short lasting captions or fast-changing captions. Enabling this is experimental and not recommended"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example run"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# https://ia804703.us.archive.org/27/items/uh-oh-here-we-go-again/Uh-Oh%2C%20Here%20we%20go%20again.mp4\n",
|
||||
"# https://ia601200.us.archive.org/9/items/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb.mp4\n",
|
||||
"\n",
|
||||
"chain = VideoCaptioningChain(\n",
|
||||
" llm=ChatOpenAI(model=\"gpt-4\", max_tokens=4000, openai_api_key=OPENAI_API_KEY),\n",
|
||||
" assemblyai_key=ASSEMBLYAI_API_KEY,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"srt_content = chain.run(\n",
|
||||
" video_file_path=\"https://ia601200.us.archive.org/9/items/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb.mp4\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(srt_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Writing output to .srt file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"output.srt\", \"w\") as file:\n",
|
||||
" file.write(srt_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "myenv",
|
||||
"language": "python",
|
||||
"name": "myenv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -601,7 +601,7 @@
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)"
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,14 +4,14 @@
|
||||
# ATTENTION: When adding a service below use a non-standard port
|
||||
# increment by one from the preceding port.
|
||||
# For credentials always use `langchain` and `langchain` for the
|
||||
# username and password.
|
||||
# username and password.
|
||||
version: "3"
|
||||
name: langchain-tests
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis/redis-stack-server:latest
|
||||
# We use non standard ports since
|
||||
# We use non standard ports since
|
||||
# these instances are used for testing
|
||||
# and users may already have existing
|
||||
# redis instances set up locally
|
||||
@@ -73,11 +73,6 @@ services:
|
||||
retries: 60
|
||||
volumes:
|
||||
- postgres_data_pgvector:/var/lib/postgresql/data
|
||||
vdms:
|
||||
image: intellabs/vdms:latest
|
||||
container_name: vdms_container
|
||||
ports:
|
||||
- "6025:55555"
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
2
docs/.gitignore
vendored
2
docs/.gitignore
vendored
@@ -1,3 +1 @@
|
||||
/.quarto/
|
||||
src/supabase.d.ts
|
||||
build
|
||||
24
docs/.local_build.sh
Executable file
24
docs/.local_build.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
|
||||
cd "${SCRIPT_DIR}"
|
||||
|
||||
mkdir -p ../_dist
|
||||
rsync -ruv --exclude node_modules --exclude api_reference --exclude .venv --exclude .docusaurus . ../_dist
|
||||
cd ../_dist
|
||||
poetry run python scripts/model_feat_table.py
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
mkdir -p docs/templates
|
||||
cp ../templates/docs/INDEX.md docs/templates/index.md
|
||||
poetry run python scripts/copy_templates.py
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langgraph/main/README.md -O docs/langgraph.md
|
||||
|
||||
yarn
|
||||
|
||||
poetry run quarto preview docs
|
||||
@@ -1,82 +0,0 @@
|
||||
# we build the docs in these stages:
|
||||
# 1. install vercel and python dependencies
|
||||
# 2. copy files from "source dir" to "intermediate dir"
|
||||
# 2. generate files like model feat table, etc in "intermediate dir"
|
||||
# 3. copy files to their right spots (e.g. langserve readme) in "intermediate dir"
|
||||
# 4. build the docs from "intermediate dir" to "output dir"
|
||||
|
||||
SOURCE_DIR = docs/
|
||||
INTERMEDIATE_DIR = build/intermediate/docs
|
||||
|
||||
OUTPUT_NEW_DIR = build/output-new
|
||||
OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
|
||||
|
||||
PYTHON = .venv/bin/python
|
||||
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
|
||||
|
||||
PORT ?= 3001
|
||||
|
||||
clean:
|
||||
rm -rf build
|
||||
|
||||
install-vercel-deps:
|
||||
yum -y update
|
||||
yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip rsync -y
|
||||
|
||||
install-py-deps:
|
||||
python3 -m venv .venv
|
||||
$(PYTHON) -m pip install --upgrade pip
|
||||
$(PYTHON) -m pip install --upgrade uv
|
||||
$(PYTHON) -m uv pip install -r vercel_requirements.txt
|
||||
$(PYTHON) -m uv pip install --editable $(PARTNER_DEPS_LIST)
|
||||
|
||||
generate-files:
|
||||
mkdir -p $(INTERMEDIATE_DIR)
|
||||
cp -r $(SOURCE_DIR)/* $(INTERMEDIATE_DIR)
|
||||
mkdir -p $(INTERMEDIATE_DIR)/templates
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/
|
||||
|
||||
copy-infra:
|
||||
mkdir -p $(OUTPUT_NEW_DIR)
|
||||
cp -r src $(OUTPUT_NEW_DIR)
|
||||
cp vercel.json $(OUTPUT_NEW_DIR)
|
||||
cp babel.config.js $(OUTPUT_NEW_DIR)
|
||||
cp -r data $(OUTPUT_NEW_DIR)
|
||||
cp docusaurus.config.js $(OUTPUT_NEW_DIR)
|
||||
cp package.json $(OUTPUT_NEW_DIR)
|
||||
cp sidebars.js $(OUTPUT_NEW_DIR)
|
||||
cp -r static $(OUTPUT_NEW_DIR)
|
||||
cp yarn.lock $(OUTPUT_NEW_DIR)
|
||||
|
||||
render:
|
||||
$(PYTHON) scripts/notebook_convert.py $(INTERMEDIATE_DIR) $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
md-sync:
|
||||
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
build: install-py-deps generate-files copy-infra render md-sync
|
||||
|
||||
vercel-build: install-vercel-deps build generate-references
|
||||
rm -rf docs
|
||||
mv $(OUTPUT_NEW_DOCS_DIR) docs
|
||||
rm -rf build
|
||||
yarn run docusaurus build
|
||||
mv build v0.2
|
||||
mkdir build
|
||||
mv v0.2 build
|
||||
mv build/v0.2/404.html build
|
||||
|
||||
start:
|
||||
cd $(OUTPUT_NEW_DIR) && yarn && yarn start --port=$(PORT)
|
||||
@@ -12,8 +12,7 @@ pre {
|
||||
}
|
||||
}
|
||||
|
||||
#my-component-root *,
|
||||
#headlessui-portal-root * {
|
||||
#my-component-root *, #headlessui-portal-root * {
|
||||
z-index: 10000;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,21 +10,12 @@ from pathlib import Path
|
||||
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
||||
|
||||
import toml
|
||||
import typing_extensions
|
||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||
from pydantic import BaseModel
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
ClassKind = Literal[
|
||||
"TypedDict",
|
||||
"Regular",
|
||||
"Pydantic",
|
||||
"enum",
|
||||
"RunnablePydantic",
|
||||
"RunnableNonPydantic",
|
||||
]
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
|
||||
|
||||
class ClassInfo(TypedDict):
|
||||
@@ -78,36 +69,8 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
# The clasification of the class is used to select a template
|
||||
# for the object when rendering the documentation.
|
||||
# See `templates` directory for defined templates.
|
||||
# This is a hacky solution to distinguish between different
|
||||
# kinds of thing that we want to render.
|
||||
if type(type_) is typing_extensions._TypedDictMeta: # type: ignore
|
||||
if type(type_) == typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif type(type_) is typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# RunnableSerializable subclasses from Pydantic which
|
||||
# for which we use autodoc_pydantic for rendering.
|
||||
# We need to distinguish these from regular Pydantic
|
||||
# classes so we can hide inherited Runnable methods
|
||||
# and provide a link to the Runnable interface from
|
||||
# the template.
|
||||
kind = "RunnablePydantic"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and not issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# These are not pydantic classes but are Runnable.
|
||||
# We'll hide all the inherited methods from Runnable
|
||||
# but use a regular class template to render.
|
||||
kind = "RunnableNonPydantic"
|
||||
elif issubclass(type_, Enum):
|
||||
kind = "enum"
|
||||
elif issubclass(type_, BaseModel):
|
||||
@@ -165,11 +128,11 @@ def _load_package_modules(
|
||||
of the modules/packages are part of the package vs. 3rd party or built-in.
|
||||
|
||||
Parameters:
|
||||
package_directory (Union[str, Path]): Path to the package directory.
|
||||
submodule (Optional[str]): Optional name of submodule to load.
|
||||
package_directory: Path to the package directory.
|
||||
submodule: Optional name of submodule to load.
|
||||
|
||||
Returns:
|
||||
Dict[str, ModuleMembers]: A dictionary where keys are module names and values are ModuleMembers objects.
|
||||
list: A list of loaded module objects.
|
||||
"""
|
||||
package_path = (
|
||||
Path(package_directory)
|
||||
@@ -224,7 +187,7 @@ def _load_package_modules(
|
||||
modules_by_namespace[top_namespace] = _module_members
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}")
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201
|
||||
|
||||
return modules_by_namespace
|
||||
|
||||
@@ -288,10 +251,6 @@ Classes
|
||||
template = "enum.rst"
|
||||
elif class_["kind"] == "Pydantic":
|
||||
template = "pydantic.rst"
|
||||
elif class_["kind"] == "RunnablePydantic":
|
||||
template = "runnable_pydantic.rst"
|
||||
elif class_["kind"] == "RunnableNonPydantic":
|
||||
template = "runnable_non_pydantic.rst"
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
@@ -400,14 +359,9 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
dirs = [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs")
|
||||
if dir_ not in ("cli", "partners", "standard-tests")
|
||||
]
|
||||
dirs += [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
if os.path.isdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
if dir_ not in ("cli", "partners")
|
||||
]
|
||||
dirs += os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
for dir_ in dirs:
|
||||
# Skip any hidden directories
|
||||
# Some of these could be present by mistake in the code base
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -33,4 +33,4 @@
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -15,8 +15,6 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
|
||||
|
||||
|
||||
{% block attributes %}
|
||||
{% endblock %}
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -1,22 +0,0 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
:model-show-json: False
|
||||
:model-show-config-summary: False
|
||||
:model-show-validator-members: False
|
||||
:model-show-field-summary: False
|
||||
:field-signature-prefix: param
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, invoke, ainvoke, batch, abatch, batch_as_completed, abatch_as_completed, astream_log, stream, astream, astream_events, transform, atransform, get_output_schema, get_prompts, configurable_fields, configurable_alternatives, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -2,129 +2,132 @@
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!-->
|
||||
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
|
||||
<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical"
|
||||
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="https://api.python.langchain.com/en/latest/{{pagename}}.html" />
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet"
|
||||
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
|
||||
type="text/css"/>
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
|
||||
type="text/css"{% if css.title is not none %}
|
||||
title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
|
||||
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}" type="text/css" />
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
|
||||
for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}"
|
||||
class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}"
|
||||
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
<div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
|
||||
{%- if prev %}
|
||||
<a href="{{ prev.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ prev.title|striptags }}">Prev</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Prev</a>
|
||||
{%- endif %}
|
||||
{%- if parents -%}
|
||||
<a href="{{ parents[-1].link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ parents[-1].title|striptags }}">Up</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink disabled py-1">Up</a>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<a href="{{ next.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ next.title|striptags }}">Next</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Next</a>
|
||||
{%- endif %}
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}" class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}" class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}
|
||||
© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}
|
||||
.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated
|
||||
on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
|
||||
@@ -1398,20 +1398,3 @@ table.sk-sponsor-table td {
|
||||
.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
|
||||
.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
|
||||
.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
|
||||
|
||||
/** Custom styles overriding certain values */
|
||||
|
||||
div.sk-sidebar-toc-wrapper {
|
||||
width: unset;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
div.sk-sidebar-toc-wrapper > [aria-label="rellinks"] {
|
||||
position: sticky;
|
||||
left: 0;
|
||||
}
|
||||
|
||||
.navbar-nav .dropdown-menu {
|
||||
max-height: 80vh;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
76
docs/code-block-loader.js
Normal file
76
docs/code-block-loader.js
Normal file
@@ -0,0 +1,76 @@
|
||||
/* eslint-disable prefer-template */
|
||||
/* eslint-disable no-param-reassign */
|
||||
// eslint-disable-next-line import/no-extraneous-dependencies
|
||||
const babel = require("@babel/core");
|
||||
const path = require("path");
|
||||
const fs = require("fs");
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string|Buffer} content Content of the resource file
|
||||
* @param {object} [map] SourceMap data consumable by https://github.com/mozilla/source-map
|
||||
* @param {any} [meta] Meta data, could be anything
|
||||
*/
|
||||
async function webpackLoader(content, map, meta) {
|
||||
const cb = this.async();
|
||||
|
||||
if (!this.resourcePath.endsWith(".ts")) {
|
||||
cb(null, JSON.stringify({ content, imports: [] }), map, meta);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await babel.parseAsync(content, {
|
||||
sourceType: "module",
|
||||
filename: this.resourcePath,
|
||||
});
|
||||
|
||||
const imports = [];
|
||||
|
||||
result.program.body.forEach((node) => {
|
||||
if (node.type === "ImportDeclaration") {
|
||||
const source = node.source.value;
|
||||
|
||||
if (!source.startsWith("langchain")) {
|
||||
return;
|
||||
}
|
||||
|
||||
node.specifiers.forEach((specifier) => {
|
||||
if (specifier.type === "ImportSpecifier") {
|
||||
const local = specifier.local.name;
|
||||
const imported = specifier.imported.name;
|
||||
imports.push({ local, imported, source });
|
||||
} else {
|
||||
throw new Error("Unsupported import type");
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
imports.forEach((imp) => {
|
||||
const { imported, source } = imp;
|
||||
const moduleName = source.split("/").slice(1).join("_");
|
||||
const docsPath = path.resolve(__dirname, "docs", "api", moduleName);
|
||||
const available = fs.readdirSync(docsPath, { withFileTypes: true });
|
||||
const found = available.find(
|
||||
(dirent) =>
|
||||
dirent.isDirectory() &&
|
||||
fs.existsSync(path.resolve(docsPath, dirent.name, imported + ".md"))
|
||||
);
|
||||
if (found) {
|
||||
imp.docs =
|
||||
"/" + path.join("docs", "api", moduleName, found.name, imported);
|
||||
} else {
|
||||
throw new Error(
|
||||
`Could not find docs for ${source}.${imported} in docs/api/`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
cb(null, JSON.stringify({ content, imports }), map, meta);
|
||||
} catch (err) {
|
||||
cb(err);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = webpackLoader;
|
||||
2391
docs/data/people.yml
2391
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
@@ -1,912 +0,0 @@
|
||||
# arXiv
|
||||
|
||||
LangChain implements the latest research in the field of Natural Language Processing.
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
Templates, and Cookbooks.
|
||||
|
||||
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
|
||||
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
|------------------|---------|-------------------|------------------------|
|
||||
| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
|
||||
- **arXiv id:** 2402.03620v1
|
||||
- **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
- **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.
|
||||
- **Published Date:** 2024-02-06
|
||||
- **URL:** http://arxiv.org/abs/2402.03620v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
|
||||
**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the
|
||||
task-intrinsic reasoning structures to tackle complex reasoning problems that
|
||||
are challenging for typical prompting methods. Core to the framework is a
|
||||
self-discovery process where LLMs select multiple atomic reasoning modules such
|
||||
as critical thinking and step-by-step thinking, and compose them into an
|
||||
explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER
|
||||
substantially improves GPT-4 and PaLM 2's performance on challenging reasoning
|
||||
benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as
|
||||
much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER
|
||||
outperforms inference-intensive methods such as CoT-Self-Consistency by more
|
||||
than 20%, while requiring 10-40x fewer inference compute. Finally, we show that
|
||||
the self-discovered reasoning structures are universally applicable across
|
||||
model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share
|
||||
commonalities with human reasoning patterns.
|
||||
|
||||
## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
|
||||
|
||||
- **arXiv id:** 2401.18059v1
|
||||
- **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
|
||||
- **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.
|
||||
- **Published Date:** 2024-01-31
|
||||
- **URL:** http://arxiv.org/abs/2401.18059v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
|
||||
**Abstract:** Retrieval-augmented language models can better adapt to changes in world
|
||||
state and incorporate long-tail knowledge. However, most existing methods
|
||||
retrieve only short contiguous chunks from a retrieval corpus, limiting
|
||||
holistic understanding of the overall document context. We introduce the novel
|
||||
approach of recursively embedding, clustering, and summarizing chunks of text,
|
||||
constructing a tree with differing levels of summarization from the bottom up.
|
||||
At inference time, our RAPTOR model retrieves from this tree, integrating
|
||||
information across lengthy documents at different levels of abstraction.
|
||||
Controlled experiments show that retrieval with recursive summaries offers
|
||||
significant improvements over traditional retrieval-augmented LMs on several
|
||||
tasks. On question-answering tasks that involve complex, multi-step reasoning,
|
||||
we show state-of-the-art results; for example, by coupling RAPTOR retrieval
|
||||
with the use of GPT-4, we can improve the best performance on the QuALITY
|
||||
benchmark by 20% in absolute accuracy.
|
||||
|
||||
## Corrective Retrieval Augmented Generation
|
||||
|
||||
- **arXiv id:** 2401.15884v2
|
||||
- **Title:** Corrective Retrieval Augmented Generation
|
||||
- **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.
|
||||
- **Published Date:** 2024-01-29
|
||||
- **URL:** http://arxiv.org/abs/2401.15884v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the
|
||||
accuracy of generated texts cannot be secured solely by the parametric
|
||||
knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a
|
||||
practicable complement to LLMs, it relies heavily on the relevance of retrieved
|
||||
documents, raising concerns about how the model behaves if retrieval goes
|
||||
wrong. To this end, we propose the Corrective Retrieval Augmented Generation
|
||||
(CRAG) to improve the robustness of generation. Specifically, a lightweight
|
||||
retrieval evaluator is designed to assess the overall quality of retrieved
|
||||
documents for a query, returning a confidence degree based on which different
|
||||
knowledge retrieval actions can be triggered. Since retrieval from static and
|
||||
limited corpora can only return sub-optimal documents, large-scale web searches
|
||||
are utilized as an extension for augmenting the retrieval results. Besides, a
|
||||
decompose-then-recompose algorithm is designed for retrieved documents to
|
||||
selectively focus on key information and filter out irrelevant information in
|
||||
them. CRAG is plug-and-play and can be seamlessly coupled with various
|
||||
RAG-based approaches. Experiments on four datasets covering short- and
|
||||
long-form generation tasks show that CRAG can significantly improve the
|
||||
performance of RAG-based approaches.
|
||||
|
||||
## Mixtral of Experts
|
||||
|
||||
- **arXiv id:** 2401.04088v1
|
||||
- **Title:** Mixtral of Experts
|
||||
- **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.
|
||||
- **Published Date:** 2024-01-08
|
||||
- **URL:** http://arxiv.org/abs/2401.04088v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
|
||||
**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model.
|
||||
Mixtral has the same architecture as Mistral 7B, with the difference that each
|
||||
layer is composed of 8 feedforward blocks (i.e. experts). For every token, at
|
||||
each layer, a router network selects two experts to process the current state
|
||||
and combine their outputs. Even though each token only sees two experts, the
|
||||
selected experts can be different at each timestep. As a result, each token has
|
||||
access to 47B parameters, but only uses 13B active parameters during inference.
|
||||
Mixtral was trained with a context size of 32k tokens and it outperforms or
|
||||
matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular,
|
||||
Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and
|
||||
multilingual benchmarks. We also provide a model fine-tuned to follow
|
||||
instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo,
|
||||
Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both
|
||||
the base and instruct models are released under the Apache 2.0 license.
|
||||
|
||||
## Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
|
||||
- **arXiv id:** 2312.06648v2
|
||||
- **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
- **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.
|
||||
- **Published Date:** 2023-12-11
|
||||
- **URL:** http://arxiv.org/abs/2312.06648v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
|
||||
**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or
|
||||
world knowledge in open-domain NLP tasks. When we use a learned dense retriever
|
||||
on a retrieval corpus at inference time, an often-overlooked design choice is
|
||||
the retrieval unit in which the corpus is indexed, e.g. document, passage, or
|
||||
sentence. We discover that the retrieval unit choice significantly impacts the
|
||||
performance of both retrieval and downstream tasks. Distinct from the typical
|
||||
approach of using passages or sentences, we introduce a novel retrieval unit,
|
||||
proposition, for dense retrieval. Propositions are defined as atomic
|
||||
expressions within text, each encapsulating a distinct factoid and presented in
|
||||
a concise, self-contained natural language format. We conduct an empirical
|
||||
comparison of different retrieval granularity. Our results reveal that
|
||||
proposition-based retrieval significantly outperforms traditional passage or
|
||||
sentence-based methods in dense retrieval. Moreover, retrieval by proposition
|
||||
also enhances the performance of downstream QA tasks, since the retrieved texts
|
||||
are more condensed with question-relevant information, reducing the need for
|
||||
lengthy input tokens and minimizing the inclusion of extraneous, irrelevant
|
||||
information.
|
||||
|
||||
## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
|
||||
- **arXiv id:** 2311.09210v1
|
||||
- **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
- **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.
|
||||
- **Published Date:** 2023-11-15
|
||||
- **URL:** http://arxiv.org/abs/2311.09210v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
|
||||
**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial
|
||||
advancement in the capabilities of large language models, notably in reducing
|
||||
factual hallucination by leveraging external knowledge sources. However, the
|
||||
reliability of the retrieved information is not always guaranteed. The
|
||||
retrieval of irrelevant data can lead to misguided responses, and potentially
|
||||
causing the model to overlook its inherent knowledge, even when it possesses
|
||||
adequate information to address the query. Moreover, standard RALMs often
|
||||
struggle to assess whether they possess adequate knowledge, both intrinsic and
|
||||
retrieved, to provide an accurate answer. In situations where knowledge is
|
||||
lacking, these systems should ideally respond with "unknown" when the answer is
|
||||
unattainable. In response to these challenges, we introduces Chain-of-Noting
|
||||
(CoN), a novel approach aimed at improving the robustness of RALMs in facing
|
||||
noisy, irrelevant documents and in handling unknown scenarios. The core idea of
|
||||
CoN is to generate sequential reading notes for retrieved documents, enabling a
|
||||
thorough evaluation of their relevance to the given question and integrating
|
||||
this information to formulate the final answer. We employed ChatGPT to create
|
||||
training data for CoN, which was subsequently trained on an LLaMa-2 7B model.
|
||||
Our experiments across four open-domain QA benchmarks show that RALMs equipped
|
||||
with CoN significantly outperform standard RALMs. Notably, CoN achieves an
|
||||
average improvement of +7.9 in EM score given entirely noisy retrieved
|
||||
documents and +10.5 in rejection rates for real-time questions that fall
|
||||
outside the pre-training knowledge scope.
|
||||
|
||||
## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
|
||||
|
||||
- **arXiv id:** 2310.11511v1
|
||||
- **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
|
||||
- **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.
|
||||
- **Published Date:** 2023-10-17
|
||||
- **URL:** http://arxiv.org/abs/2310.11511v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
|
||||
**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often
|
||||
produce responses containing factual inaccuracies due to their sole reliance on
|
||||
the parametric knowledge they encapsulate. Retrieval-Augmented Generation
|
||||
(RAG), an ad hoc approach that augments LMs with retrieval of relevant
|
||||
knowledge, decreases such issues. However, indiscriminately retrieving and
|
||||
incorporating a fixed number of retrieved passages, regardless of whether
|
||||
retrieval is necessary, or passages are relevant, diminishes LM versatility or
|
||||
can lead to unhelpful response generation. We introduce a new framework called
|
||||
Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's
|
||||
quality and factuality through retrieval and self-reflection. Our framework
|
||||
trains a single arbitrary LM that adaptively retrieves passages on-demand, and
|
||||
generates and reflects on retrieved passages and its own generations using
|
||||
special tokens, called reflection tokens. Generating reflection tokens makes
|
||||
the LM controllable during the inference phase, enabling it to tailor its
|
||||
behavior to diverse task requirements. Experiments show that Self-RAG (7B and
|
||||
13B parameters) significantly outperforms state-of-the-art LLMs and
|
||||
retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG
|
||||
outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA,
|
||||
reasoning and fact verification tasks, and it shows significant gains in
|
||||
improving factuality and citation accuracy for long-form generations relative
|
||||
to these models.
|
||||
|
||||
## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
|
||||
- **arXiv id:** 2310.06117v2
|
||||
- **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
- **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.
|
||||
- **Published Date:** 2023-10-09
|
||||
- **URL:** http://arxiv.org/abs/2310.06117v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
|
||||
- **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
|
||||
**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables
|
||||
LLMs to do abstractions to derive high-level concepts and first principles from
|
||||
instances containing specific details. Using the concepts and principles to
|
||||
guide reasoning, LLMs significantly improve their abilities in following a
|
||||
correct reasoning path towards the solution. We conduct experiments of
|
||||
Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe
|
||||
substantial performance gains on various challenging reasoning-intensive tasks
|
||||
including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back
|
||||
Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7%
|
||||
and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
|
||||
|
||||
## Llama 2: Open Foundation and Fine-Tuned Chat Models
|
||||
|
||||
- **arXiv id:** 2307.09288v2
|
||||
- **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models
|
||||
- **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.
|
||||
- **Published Date:** 2023-07-18
|
||||
- **URL:** http://arxiv.org/abs/2307.09288v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
|
||||
**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and
|
||||
fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70
|
||||
billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for
|
||||
dialogue use cases. Our models outperform open-source chat models on most
|
||||
benchmarks we tested, and based on our human evaluations for helpfulness and
|
||||
safety, may be a suitable substitute for closed-source models. We provide a
|
||||
detailed description of our approach to fine-tuning and safety improvements of
|
||||
Llama 2-Chat in order to enable the community to build on our work and
|
||||
contribute to the responsible development of LLMs.
|
||||
|
||||
## Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.14283v3
|
||||
- **Title:** Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
- **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.
|
||||
- **Published Date:** 2023-05-23
|
||||
- **URL:** http://arxiv.org/abs/2305.14283v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
|
||||
- **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
|
||||
|
||||
**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the
|
||||
retrieve-then-read pipeline, making remarkable progress in knowledge-intensive
|
||||
tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of
|
||||
the previous retrieve-then-read for the retrieval-augmented LLMs from the
|
||||
perspective of the query rewriting. Unlike prior studies focusing on adapting
|
||||
either the retriever or the reader, our approach pays attention to the
|
||||
adaptation of the search query itself, for there is inevitably a gap between
|
||||
the input text and the needed knowledge in retrieval. We first prompt an LLM to
|
||||
generate the query, then use a web search engine to retrieve contexts.
|
||||
Furthermore, to better align the query to the frozen modules, we propose a
|
||||
trainable scheme for our pipeline. A small language model is adopted as a
|
||||
trainable rewriter to cater to the black-box LLM reader. The rewriter is
|
||||
trained using the feedback of the LLM reader by reinforcement learning.
|
||||
Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice
|
||||
QA. Experiments results show consistent performance improvement, indicating
|
||||
that our framework is proven effective and scalable, and brings a new framework
|
||||
for retrieval-augmented LLM.
|
||||
|
||||
## Large Language Model Guided Tree-of-Thought
|
||||
|
||||
- **arXiv id:** 2305.08291v1
|
||||
- **Title:** Large Language Model Guided Tree-of-Thought
|
||||
- **Authors:** Jieyi Long
|
||||
- **Published Date:** 2023-05-15
|
||||
- **URL:** http://arxiv.org/abs/2305.08291v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
|
||||
- **Cookbook:** [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
|
||||
**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
|
||||
approach aimed at improving the problem-solving capabilities of auto-regressive
|
||||
large language models (LLMs). The ToT technique is inspired by the human mind's
|
||||
approach for solving complex reasoning tasks through trial and error. In this
|
||||
process, the human mind explores the solution space through a tree-like thought
|
||||
process, allowing for backtracking when necessary. To implement ToT as a
|
||||
software system, we augment an LLM with additional modules including a prompter
|
||||
agent, a checker module, a memory module, and a ToT controller. In order to
|
||||
solve a given problem, these modules engage in a multi-round conversation with
|
||||
the LLM. The memory module records the conversation and state history of the
|
||||
problem solving process, which allows the system to backtrack to the previous
|
||||
steps of the thought-process and explore other directions from there. To verify
|
||||
the effectiveness of the proposed technique, we implemented a ToT-based solver
|
||||
for the Sudoku Puzzle. Experimental results show that the ToT framework can
|
||||
significantly increase the success rate of Sudoku puzzle solving. Our
|
||||
implementation of the ToT-based Sudoku solver is available on GitHub:
|
||||
\url{https://github.com/jieyilong/tree-of-thought-puzzle-solver}.
|
||||
|
||||
## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.04091v3
|
||||
- **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
|
||||
- **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.
|
||||
- **Published Date:** 2023-05-06
|
||||
- **URL:** http://arxiv.org/abs/2305.04091v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive
|
||||
performance in various NLP tasks. To tackle multi-step reasoning tasks,
|
||||
few-shot chain-of-thought (CoT) prompting includes a few manually crafted
|
||||
step-by-step reasoning demonstrations which enable LLMs to explicitly generate
|
||||
reasoning steps and improve their reasoning task accuracy. To eliminate the
|
||||
manual effort, Zero-shot-CoT concatenates the target problem statement with
|
||||
"Let's think step by step" as an input prompt to LLMs. Despite the success of
|
||||
Zero-shot-CoT, it still suffers from three pitfalls: calculation errors,
|
||||
missing-step errors, and semantic misunderstanding errors. To address the
|
||||
missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of
|
||||
two components: first, devising a plan to divide the entire task into smaller
|
||||
subtasks, and then carrying out the subtasks according to the plan. To address
|
||||
the calculation errors and improve the quality of generated reasoning steps, we
|
||||
extend PS prompting with more detailed instructions and derive PS+ prompting.
|
||||
We evaluate our proposed prompting strategy on ten datasets across three
|
||||
reasoning problems. The experimental results over GPT-3 show that our proposed
|
||||
zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets
|
||||
by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought
|
||||
Prompting, and has comparable performance with 8-shot CoT prompting on the math
|
||||
reasoning problem. The code can be found at
|
||||
https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
|
||||
|
||||
## Visual Instruction Tuning
|
||||
|
||||
- **arXiv id:** 2304.08485v2
|
||||
- **Title:** Visual Instruction Tuning
|
||||
- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.
|
||||
- **Published Date:** 2023-04-17
|
||||
- **URL:** http://arxiv.org/abs/2304.08485v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
|
||||
**Abstract:** Instruction tuning large language models (LLMs) using machine-generated
|
||||
instruction-following data has improved zero-shot capabilities on new tasks,
|
||||
but the idea is less explored in the multimodal field. In this paper, we
|
||||
present the first attempt to use language-only GPT-4 to generate multimodal
|
||||
language-image instruction-following data. By instruction tuning on such
|
||||
generated data, we introduce LLaVA: Large Language and Vision Assistant, an
|
||||
end-to-end trained large multimodal model that connects a vision encoder and
|
||||
LLM for general-purpose visual and language understanding.Our early experiments
|
||||
show that LLaVA demonstrates impressive multimodel chat abilities, sometimes
|
||||
exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and
|
||||
yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal
|
||||
instruction-following dataset. When fine-tuned on Science QA, the synergy of
|
||||
LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make
|
||||
GPT-4 generated visual instruction tuning data, our model and code base
|
||||
publicly available.
|
||||
|
||||
## Generative Agents: Interactive Simulacra of Human Behavior
|
||||
|
||||
- **arXiv id:** 2304.03442v2
|
||||
- **Title:** Generative Agents: Interactive Simulacra of Human Behavior
|
||||
- **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.
|
||||
- **Published Date:** 2023-04-07
|
||||
- **URL:** http://arxiv.org/abs/2304.03442v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
|
||||
**Abstract:** Believable proxies of human behavior can empower interactive applications
|
||||
ranging from immersive environments to rehearsal spaces for interpersonal
|
||||
communication to prototyping tools. In this paper, we introduce generative
|
||||
agents--computational software agents that simulate believable human behavior.
|
||||
Generative agents wake up, cook breakfast, and head to work; artists paint,
|
||||
while authors write; they form opinions, notice each other, and initiate
|
||||
conversations; they remember and reflect on days past as they plan the next
|
||||
day. To enable generative agents, we describe an architecture that extends a
|
||||
large language model to store a complete record of the agent's experiences
|
||||
using natural language, synthesize those memories over time into higher-level
|
||||
reflections, and retrieve them dynamically to plan behavior. We instantiate
|
||||
generative agents to populate an interactive sandbox environment inspired by
|
||||
The Sims, where end users can interact with a small town of twenty five agents
|
||||
using natural language. In an evaluation, these generative agents produce
|
||||
believable individual and emergent social behaviors: for example, starting with
|
||||
only a single user-specified notion that one agent wants to throw a Valentine's
|
||||
Day party, the agents autonomously spread invitations to the party over the
|
||||
next two days, make new acquaintances, ask each other out on dates to the
|
||||
party, and coordinate to show up for the party together at the right time. We
|
||||
demonstrate through ablation that the components of our agent
|
||||
architecture--observation, planning, and reflection--each contribute critically
|
||||
to the believability of agent behavior. By fusing large language models with
|
||||
computational, interactive agents, this work introduces architectural and
|
||||
interaction patterns for enabling believable simulations of human behavior.
|
||||
|
||||
## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
|
||||
|
||||
- **arXiv id:** 2303.17760v2
|
||||
- **Title:** CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
|
||||
- **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.
|
||||
- **Published Date:** 2023-03-31
|
||||
- **URL:** http://arxiv.org/abs/2303.17760v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
|
||||
**Abstract:** The rapid advancement of chat-based language models has led to remarkable
|
||||
progress in complex task-solving. However, their success heavily relies on
|
||||
human input to guide the conversation, which can be challenging and
|
||||
time-consuming. This paper explores the potential of building scalable
|
||||
techniques to facilitate autonomous cooperation among communicative agents, and
|
||||
provides insight into their "cognitive" processes. To address the challenges of
|
||||
achieving autonomous cooperation, we propose a novel communicative agent
|
||||
framework named role-playing. Our approach involves using inception prompting
|
||||
to guide chat agents toward task completion while maintaining consistency with
|
||||
human intentions. We showcase how role-playing can be used to generate
|
||||
conversational data for studying the behaviors and capabilities of a society of
|
||||
agents, providing a valuable resource for investigating conversational language
|
||||
models. In particular, we conduct comprehensive studies on
|
||||
instruction-following cooperation in multi-agent settings. Our contributions
|
||||
include introducing a novel communicative agent framework, offering a scalable
|
||||
approach for studying the cooperative behaviors and capabilities of multi-agent
|
||||
systems, and open-sourcing our library to support research on communicative
|
||||
agents and beyond: https://github.com/camel-ai/camel.
|
||||
|
||||
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
|
||||
- **arXiv id:** 2303.17580v4
|
||||
- **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
|
||||
- **Published Date:** 2023-03-30
|
||||
- **URL:** http://arxiv.org/abs/2303.17580v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
|
||||
- **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
|
||||
**Abstract:** Solving complicated AI tasks with different domains and modalities is a key
|
||||
step toward artificial general intelligence. While there are numerous AI models
|
||||
available for various domains and modalities, they cannot handle complicated AI
|
||||
tasks autonomously. Considering large language models (LLMs) have exhibited
|
||||
exceptional abilities in language understanding, generation, interaction, and
|
||||
reasoning, we advocate that LLMs could act as a controller to manage existing
|
||||
AI models to solve complicated AI tasks, with language serving as a generic
|
||||
interface to empower this. Based on this philosophy, we present HuggingGPT, an
|
||||
LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI
|
||||
models in machine learning communities (e.g., Hugging Face) to solve AI tasks.
|
||||
Specifically, we use ChatGPT to conduct task planning when receiving a user
|
||||
request, select models according to their function descriptions available in
|
||||
Hugging Face, execute each subtask with the selected AI model, and summarize
|
||||
the response according to the execution results. By leveraging the strong
|
||||
language capability of ChatGPT and abundant AI models in Hugging Face,
|
||||
HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different
|
||||
modalities and domains and achieve impressive results in language, vision,
|
||||
speech, and other challenging tasks, which paves a new way towards the
|
||||
realization of artificial general intelligence.
|
||||
|
||||
## GPT-4 Technical Report
|
||||
|
||||
- **arXiv id:** 2303.08774v6
|
||||
- **Title:** GPT-4 Technical Report
|
||||
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
|
||||
- **Published Date:** 2023-03-15
|
||||
- **URL:** http://arxiv.org/abs/2303.08774v6
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
|
||||
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
|
||||
accept image and text inputs and produce text outputs. While less capable than
|
||||
humans in many real-world scenarios, GPT-4 exhibits human-level performance on
|
||||
various professional and academic benchmarks, including passing a simulated bar
|
||||
exam with a score around the top 10% of test takers. GPT-4 is a
|
||||
Transformer-based model pre-trained to predict the next token in a document.
|
||||
The post-training alignment process results in improved performance on measures
|
||||
of factuality and adherence to desired behavior. A core component of this
|
||||
project was developing infrastructure and optimization methods that behave
|
||||
predictably across a wide range of scales. This allowed us to accurately
|
||||
predict some aspects of GPT-4's performance based on models trained with no
|
||||
more than 1/1,000th the compute of GPT-4.
|
||||
|
||||
## A Watermark for Large Language Models
|
||||
|
||||
- **arXiv id:** 2301.10226v4
|
||||
- **Title:** A Watermark for Large Language Models
|
||||
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
|
||||
- **Published Date:** 2023-01-24
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
humans but algorithmically detectable from a short span of tokens. We propose a
|
||||
watermarking framework for proprietary language models. The watermark can be
|
||||
embedded with negligible impact on text quality, and can be detected using an
|
||||
efficient open-source algorithm without access to the language model API or
|
||||
parameters. The watermark works by selecting a randomized set of "green" tokens
|
||||
before a word is generated, and then softly promoting use of green tokens
|
||||
during sampling. We propose a statistical test for detecting the watermark with
|
||||
interpretable p-values, and derive an information-theoretic framework for
|
||||
analyzing the sensitivity of the watermark. We test the watermark using a
|
||||
multi-billion parameter model from the Open Pretrained Transformer (OPT)
|
||||
family, and discuss robustness and security.
|
||||
|
||||
## Precise Zero-Shot Dense Retrieval without Relevance Labels
|
||||
|
||||
- **arXiv id:** 2212.10496v1
|
||||
- **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels
|
||||
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
|
||||
- **Published Date:** 2022-12-20
|
||||
- **URL:** http://arxiv.org/abs/2212.10496v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
- **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
|
||||
**Abstract:** While dense retrieval has been shown effective and efficient across tasks and
|
||||
languages, it remains difficult to create effective fully zero-shot dense
|
||||
retrieval systems when no relevance label is available. In this paper, we
|
||||
recognize the difficulty of zero-shot learning and encoding relevance. Instead,
|
||||
we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a
|
||||
query, HyDE first zero-shot instructs an instruction-following language model
|
||||
(e.g. InstructGPT) to generate a hypothetical document. The document captures
|
||||
relevance patterns but is unreal and may contain false details. Then, an
|
||||
unsupervised contrastively learned encoder~(e.g. Contriever) encodes the
|
||||
document into an embedding vector. This vector identifies a neighborhood in the
|
||||
corpus embedding space, where similar real documents are retrieved based on
|
||||
vector similarity. This second step ground the generated document to the actual
|
||||
corpus, with the encoder's dense bottleneck filtering out the incorrect
|
||||
details. Our experiments show that HyDE significantly outperforms the
|
||||
state-of-the-art unsupervised dense retriever Contriever and shows strong
|
||||
performance comparable to fine-tuned retrievers, across various tasks (e.g. web
|
||||
search, QA, fact verification) and languages~(e.g. sw, ko, ja).
|
||||
|
||||
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
|
||||
- **arXiv id:** 2212.07425v3
|
||||
- **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
|
||||
- **Published Date:** 2022-12-12
|
||||
- **URL:** http://arxiv.org/abs/2212.07425v3
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
|
||||
**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
|
||||
amplified in the Internet era. Given the volume of data and the subtlety of
|
||||
identifying violations of argumentation norms, supporting information analytics
|
||||
tasks, like content moderation, with trustworthy methods that can identify
|
||||
logical fallacies is essential. In this paper, we formalize prior theoretical
|
||||
work on logical fallacies into a comprehensive three-stage evaluation framework
|
||||
of detection, coarse-grained, and fine-grained classification. We adapt
|
||||
existing evaluation datasets for each stage of the evaluation. We employ three
|
||||
families of robust and explainable methods based on prototype reasoning,
|
||||
instance-based reasoning, and knowledge injection. The methods combine language
|
||||
models with background knowledge and explainable mechanisms. Moreover, we
|
||||
address data sparsity with strategies for data augmentation and curriculum
|
||||
learning. Our three-stage framework natively consolidates prior datasets and
|
||||
methods from existing tasks, like propaganda detection, serving as an
|
||||
overarching evaluation testbed. We extensively evaluate these methods on our
|
||||
datasets, focusing on their robustness and explainability. Our results provide
|
||||
insight into the strengths and weaknesses of the methods on different
|
||||
components and fallacy classes, indicating that fallacy identification is a
|
||||
challenging task that may require specialized forms of reasoning to capture
|
||||
various classes. We share our open-source code and data on GitHub to support
|
||||
further work on logical fallacy identification.
|
||||
|
||||
## Complementary Explanations for Effective In-Context Learning
|
||||
|
||||
- **arXiv id:** 2211.13892v2
|
||||
- **Title:** Complementary Explanations for Effective In-Context Learning
|
||||
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
|
||||
- **Published Date:** 2022-11-25
|
||||
- **URL:** http://arxiv.org/abs/2211.13892v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
|
||||
learning from explanations in prompts, but there has been limited understanding
|
||||
of exactly how these explanations function or why they are effective. This work
|
||||
aims to better understand the mechanisms by which explanations are used for
|
||||
in-context learning. We first study the impact of two different factors on the
|
||||
performance of prompts with explanations: the computation trace (the way the
|
||||
solution is decomposed) and the natural language used to express the prompt. By
|
||||
perturbing explanations on three controlled tasks, we show that both factors
|
||||
contribute to the effectiveness of explanations. We further study how to form
|
||||
maximally effective sets of explanations for solving a given test query. We
|
||||
find that LLMs can benefit from the complementarity of the explanation set:
|
||||
diverse reasoning skills shown by different exemplars can lead to better
|
||||
performance. Therefore, we propose a maximal marginal relevance-based exemplar
|
||||
selection approach for constructing exemplar sets that are both relevant as
|
||||
well as complementary, which successfully improves the in-context learning
|
||||
performance across three real-world tasks on multiple LLMs.
|
||||
|
||||
## PAL: Program-aided Language Models
|
||||
|
||||
- **arXiv id:** 2211.10435v2
|
||||
- **Title:** PAL: Program-aided Language Models
|
||||
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
|
||||
- **Published Date:** 2022-11-18
|
||||
- **URL:** http://arxiv.org/abs/2211.10435v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
- **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
|
||||
to perform arithmetic and symbolic reasoning tasks, when provided with a few
|
||||
examples at test time ("few-shot prompting"). Much of this success can be
|
||||
attributed to prompting methods such as "chain-of-thought'', which employ LLMs
|
||||
for both understanding the problem description by decomposing it into steps, as
|
||||
well as solving each step of the problem. While LLMs seem to be adept at this
|
||||
sort of step-by-step decomposition, LLMs often make logical and arithmetic
|
||||
mistakes in the solution part, even when the problem is decomposed correctly.
|
||||
In this paper, we present Program-Aided Language models (PAL): a novel approach
|
||||
that uses the LLM to read natural language problems and generate programs as
|
||||
the intermediate reasoning steps, but offloads the solution step to a runtime
|
||||
such as a Python interpreter. With PAL, decomposing the natural language
|
||||
problem into runnable steps remains the only learning task for the LLM, while
|
||||
solving is delegated to the interpreter. We demonstrate this synergy between a
|
||||
neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and
|
||||
algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all
|
||||
these natural language reasoning tasks, generating code using an LLM and
|
||||
reasoning using a Python interpreter leads to more accurate results than much
|
||||
larger models. For example, PAL using Codex achieves state-of-the-art few-shot
|
||||
accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
|
||||
which uses chain-of-thought by absolute 15% top-1. Our code and data are
|
||||
publicly available at http://reasonwithpal.com/ .
|
||||
|
||||
## ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
|
||||
- **arXiv id:** 2210.03629v3
|
||||
- **Title:** ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
|
||||
- **Published Date:** 2022-10-06
|
||||
- **URL:** http://arxiv.org/abs/2210.03629v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
|
||||
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
|
||||
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
|
||||
across tasks in language understanding and interactive decision making, their
|
||||
abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g.
|
||||
action plan generation) have primarily been studied as separate topics. In this
|
||||
paper, we explore the use of LLMs to generate both reasoning traces and
|
||||
task-specific actions in an interleaved manner, allowing for greater synergy
|
||||
between the two: reasoning traces help the model induce, track, and update
|
||||
action plans as well as handle exceptions, while actions allow it to interface
|
||||
with external sources, such as knowledge bases or environments, to gather
|
||||
additional information. We apply our approach, named ReAct, to a diverse set of
|
||||
language and decision making tasks and demonstrate its effectiveness over
|
||||
state-of-the-art baselines, as well as improved human interpretability and
|
||||
trustworthiness over methods without reasoning or acting components.
|
||||
Concretely, on question answering (HotpotQA) and fact verification (Fever),
|
||||
ReAct overcomes issues of hallucination and error propagation prevalent in
|
||||
chain-of-thought reasoning by interacting with a simple Wikipedia API, and
|
||||
generates human-like task-solving trajectories that are more interpretable than
|
||||
baselines without reasoning traces. On two interactive decision making
|
||||
benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and
|
||||
reinforcement learning methods by an absolute success rate of 34% and 10%
|
||||
respectively, while being prompted with only one or two in-context examples.
|
||||
Project site with code: https://react-lm.github.io
|
||||
|
||||
## Deep Lake: a Lakehouse for Deep Learning
|
||||
|
||||
- **arXiv id:** 2209.10785v2
|
||||
- **Title:** Deep Lake: a Lakehouse for Deep Learning
|
||||
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
|
||||
- **Published Date:** 2022-09-22
|
||||
- **URL:** http://arxiv.org/abs/2209.10785v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
|
||||
**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
|
||||
workloads by enabling time travel, running SQL queries, ingesting data with
|
||||
ACID transactions, and visualizing petabyte-scale datasets on cloud storage.
|
||||
They allow organizations to break down data silos, unlock data-driven
|
||||
decision-making, improve operational efficiency, and reduce costs. However, as
|
||||
deep learning usage increases, traditional data lakes are not well-designed for
|
||||
applications such as natural language processing (NLP), audio processing,
|
||||
computer vision, and applications involving non-tabular datasets. This paper
|
||||
presents Deep Lake, an open-source lakehouse for deep learning applications
|
||||
developed at Activeloop. Deep Lake maintains the benefits of a vanilla data
|
||||
lake with one key difference: it stores complex data, such as images, videos,
|
||||
annotations, as well as tabular data, in the form of tensors and rapidly
|
||||
streams the data over the network to (a) Tensor Query Language, (b) in-browser
|
||||
visualization engine, or (c) deep learning frameworks without sacrificing GPU
|
||||
utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
|
||||
TensorFlow, JAX, and integrate with numerous MLOps tools.
|
||||
|
||||
## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
||||
|
||||
- **arXiv id:** 2205.12654v1
|
||||
- **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
||||
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
|
||||
- **Published Date:** 2022-05-25
|
||||
- **URL:** http://arxiv.org/abs/2205.12654v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
|
||||
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
|
||||
languages is challenging, in particular to cover the long tail of low-resource
|
||||
languages. A promising approach has been to train one-for-all multilingual
|
||||
models capable of cross-lingual transfer, but these models often suffer from
|
||||
insufficient capacity and interference between unrelated languages. Instead, we
|
||||
move away from this approach and focus on training multiple language (family)
|
||||
specific representations, but most prominently enable all languages to still be
|
||||
encoded in the same representational space. To achieve this, we focus on
|
||||
teacher-student training, allowing all encoders to be mutually compatible for
|
||||
bitext mining, and enabling fast learning of new languages. We introduce a new
|
||||
teacher-student training scheme which combines supervised and self-supervised
|
||||
training, allowing encoders to take advantage of monolingual training data,
|
||||
which is valuable in the low-resource setting.
|
||||
Our approach significantly outperforms the original LASER encoder. We study
|
||||
very low-resource languages and handle 50 African languages, many of which are
|
||||
not covered by any other model. For these languages, we train sentence
|
||||
encoders, mine bitexts, and validate the bitexts by training NMT systems.
|
||||
|
||||
## Evaluating the Text-to-SQL Capabilities of Large Language Models
|
||||
|
||||
- **arXiv id:** 2204.00498v1
|
||||
- **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models
|
||||
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
|
||||
- **Published Date:** 2022-03-15
|
||||
- **URL:** http://arxiv.org/abs/2204.00498v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
|
||||
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
|
||||
language model. We find that, without any finetuning, Codex is a strong
|
||||
baseline on the Spider benchmark; we also analyze the failure modes of Codex in
|
||||
this setting. Furthermore, we demonstrate on the GeoQuery and Scholar
|
||||
benchmarks that a small number of in-domain examples provided in the prompt
|
||||
enables Codex to perform better than state-of-the-art models finetuned on such
|
||||
few-shot examples.
|
||||
|
||||
## Locally Typical Sampling
|
||||
|
||||
- **arXiv id:** 2202.00666v5
|
||||
- **Title:** Locally Typical Sampling
|
||||
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
|
||||
- **Published Date:** 2022-02-01
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
perform well under standard metrics, e.g., perplexity. This discrepancy has
|
||||
puzzled the language generation community for the last few years. In this work,
|
||||
we posit that the abstraction of natural language generation as a discrete
|
||||
stochastic process--which allows for an information-theoretic analysis--can
|
||||
provide new insights into the behavior of probabilistic language generators,
|
||||
e.g., why high-probability texts can be dull or repetitive. Humans use language
|
||||
as a means of communicating information, aiming to do so in a simultaneously
|
||||
efficient and error-minimizing manner; in fact, psycholinguistics research
|
||||
suggests humans choose each word in a string with this subconscious goal in
|
||||
mind. We formally define the set of strings that meet this criterion: those for
|
||||
which each word has an information content close to the expected information
|
||||
content, i.e., the conditional entropy of our model. We then propose a simple
|
||||
and efficient procedure for enforcing this criterion when generating from
|
||||
probabilistic models, which we call locally typical sampling. Automatic and
|
||||
human evaluations show that, in comparison to nucleus and top-k sampling,
|
||||
locally typical sampling offers competitive performance (in both abstractive
|
||||
summarization and story generation) in terms of quality while consistently
|
||||
reducing degenerate repetitions.
|
||||
|
||||
## Learning Transferable Visual Models From Natural Language Supervision
|
||||
|
||||
- **arXiv id:** 2103.00020v1
|
||||
- **Title:** Learning Transferable Visual Models From Natural Language Supervision
|
||||
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
|
||||
- **Published Date:** 2021-02-26
|
||||
- **URL:** http://arxiv.org/abs/2103.00020v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
|
||||
**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
|
||||
of predetermined object categories. This restricted form of supervision limits
|
||||
their generality and usability since additional labeled data is needed to
|
||||
specify any other visual concept. Learning directly from raw text about images
|
||||
is a promising alternative which leverages a much broader source of
|
||||
supervision. We demonstrate that the simple pre-training task of predicting
|
||||
which caption goes with which image is an efficient and scalable way to learn
|
||||
SOTA image representations from scratch on a dataset of 400 million (image,
|
||||
text) pairs collected from the internet. After pre-training, natural language
|
||||
is used to reference learned visual concepts (or describe new ones) enabling
|
||||
zero-shot transfer of the model to downstream tasks. We study the performance
|
||||
of this approach by benchmarking on over 30 different existing computer vision
|
||||
datasets, spanning tasks such as OCR, action recognition in videos,
|
||||
geo-localization, and many types of fine-grained object classification. The
|
||||
model transfers non-trivially to most tasks and is often competitive with a
|
||||
fully supervised baseline without the need for any dataset specific training.
|
||||
For instance, we match the accuracy of the original ResNet-50 on ImageNet
|
||||
zero-shot without needing to use any of the 1.28 million training examples it
|
||||
was trained on. We release our code and pre-trained model weights at
|
||||
https://github.com/OpenAI/CLIP.
|
||||
|
||||
## CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
|
||||
- **arXiv id:** 1909.05858v2
|
||||
- **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
|
||||
- **Published Date:** 2019-09-11
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
release CTRL, a 1.63 billion-parameter conditional transformer language model,
|
||||
trained to condition on control codes that govern style, content, and
|
||||
task-specific behavior. Control codes were derived from structure that
|
||||
naturally co-occurs with raw text, preserving the advantages of unsupervised
|
||||
learning while providing more explicit control over text generation. These
|
||||
codes also allow CTRL to predict which parts of the training data are most
|
||||
likely given a sequence. This provides a potential method for analyzing large
|
||||
amounts of data via model-based source attribution. We have released multiple
|
||||
full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
|
||||
|
||||
## Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
|
||||
|
||||
- **arXiv id:** 1908.10084v1
|
||||
- **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
|
||||
- **Authors:** Nils Reimers, Iryna Gurevych
|
||||
- **Published Date:** 2019-08-27
|
||||
- **URL:** http://arxiv.org/abs/1908.10084v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
|
||||
state-of-the-art performance on sentence-pair regression tasks like semantic
|
||||
textual similarity (STS). However, it requires that both sentences are fed into
|
||||
the network, which causes a massive computational overhead: Finding the most
|
||||
similar pair in a collection of 10,000 sentences requires about 50 million
|
||||
inference computations (~65 hours) with BERT. The construction of BERT makes it
|
||||
unsuitable for semantic similarity search as well as for unsupervised tasks
|
||||
like clustering.
|
||||
In this publication, we present Sentence-BERT (SBERT), a modification of the
|
||||
pretrained BERT network that use siamese and triplet network structures to
|
||||
derive semantically meaningful sentence embeddings that can be compared using
|
||||
cosine-similarity. This reduces the effort for finding the most similar pair
|
||||
from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while
|
||||
maintaining the accuracy from BERT.
|
||||
We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning
|
||||
tasks, where it outperforms other state-of-the-art sentence embeddings methods.
|
||||
|
||||
@@ -241,6 +241,7 @@ Dependents stats for `langchain-ai/langchain`
|
||||
|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 514 |
|
||||
|[sajjadium/ctf-archives](https://github.com/sajjadium/ctf-archives) | 507 |
|
||||
|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 502 |
|
||||
|[llmOS/opencopilot](https://github.com/llmOS/opencopilot) | 495 |
|
||||
|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 494 |
|
||||
|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 493 |
|
||||
|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 492 |
|
||||
@@ -454,6 +455,7 @@ Dependents stats for `langchain-ai/langchain`
|
||||
|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 149 |
|
||||
|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 148 |
|
||||
|[ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 148 |
|
||||
|[lmstudio-ai/examples](https://github.com/lmstudio-ai/examples) | 147 |
|
||||
|[solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 147 |
|
||||
|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 147 |
|
||||
|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 146 |
|
||||
|
||||
@@ -1,38 +1,40 @@
|
||||
# 3rd Party Tutorials
|
||||
# Tutorials
|
||||
|
||||
## Books and Handbooks
|
||||
|
||||
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
|
||||
|
||||
## Tutorials
|
||||
|
||||
### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)
|
||||
### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)
|
||||
### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)
|
||||
### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
|
||||
### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
|
||||
### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
|
||||
### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
|
||||
### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
|
||||
### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
|
||||
### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)
|
||||
|
||||
|
||||
## Courses
|
||||
|
||||
### Featured courses on Deeplearning.AI
|
||||
|
||||
- [LangChain for LLM Application Development](https://www.deeplearning.ai/short-courses/langchain-for-llm-application-development/)
|
||||
- [LangChain Chat with Your Data](https://www.deeplearning.ai/short-courses/langchain-chat-with-your-data/)
|
||||
- [Functions, Tools and Agents with LangChain](https://www.deeplearning.ai/short-courses/functions-tools-agents-langchain/)
|
||||
- [Build LLM Apps with LangChain.js](https://www.deeplearning.ai/short-courses/build-llm-apps-with-langchain-js/)
|
||||
- [LangChain for LLM Application Development](https://learn.deeplearning.ai/langchain)
|
||||
- [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
|
||||
- [Functions, Tools and Agents with LangChain](https://learn.deeplearning.ai/functions-tools-agents-langchain)
|
||||
- [Build LLM Apps with LangChain.js](https://learn.deeplearning.ai/courses/build-llm-apps-with-langchain-js)
|
||||
|
||||
### Online courses
|
||||
|
||||
- [Udemy](https://www.udemy.com/courses/search/?q=langchain)
|
||||
- [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)
|
||||
- [Pluralsight](https://www.pluralsight.com/search?q=langchain)
|
||||
- [Coursera](https://www.coursera.org/search?query=langchain)
|
||||
- [Maven](https://maven.com/courses?query=langchain)
|
||||
- [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)
|
||||
- [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)
|
||||
- [edX](https://www.edx.org/search?q=langchain)
|
||||
- [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)
|
||||
|
||||
## Short Tutorials
|
||||
|
||||
@@ -41,11 +43,8 @@
|
||||
- [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)
|
||||
- [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)
|
||||
|
||||
## Books and Handbooks
|
||||
|
||||
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
- [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)
|
||||
## [Documentation: Use cases](/docs/use_cases)
|
||||
|
||||
---------------------
|
||||
|
||||
|
||||
|
||||
@@ -1,63 +1,137 @@
|
||||
# YouTube videos
|
||||
|
||||
[Updated 2024-05-16]
|
||||
⛓ icon marks a new addition [last update 2023-09-21]
|
||||
|
||||
### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)
|
||||
|
||||
### [Tutorials on YouTube](/docs/additional_resources/tutorials/#tutorials)
|
||||
### Introduction to LangChain with Harrison Chase, creator of LangChain
|
||||
- [Building the Future with LLMs, `LangChain`, & `Pinecone`](https://youtu.be/nMniwlGyX-c) by [Pinecone](https://www.youtube.com/@pinecone-io)
|
||||
- [LangChain and Weaviate with Harrison Chase and Bob van Luijt - Weaviate Podcast #36](https://youtu.be/lhby7Ql7hbk) by [Weaviate • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [LangChain Demo + Q&A with Harrison Chase](https://youtu.be/zaYTXQFR0_s?t=788) by [Full Stack Deep Learning](https://www.youtube.com/@FullStackDeepLearning)
|
||||
- [LangChain Agents: Build Personal Assistants For Your Data (Q&A with Harrison Chase and Mayo Oshin)](https://youtu.be/gVkF8cwfBLI) by [Chat with data](https://www.youtube.com/@chatwithdata)
|
||||
|
||||
## Videos (sorted by views)
|
||||
|
||||
Only videos with 40K+ views:
|
||||
- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain OpenAI API)](https://youtu.be/9AXP7tCI9PI) by [TechLead](https://www.youtube.com/@TechLead)
|
||||
- [First look - `ChatGPT` + `WolframAlpha` (`GPT-3.5` and Wolfram|Alpha via LangChain by James Weaver)](https://youtu.be/wYGbY811oMo) by [Dr Alan D. Thompson](https://www.youtube.com/@DrAlanDThompson)
|
||||
- [LangChain explained - The hottest new Python framework](https://youtu.be/RoR4XJw8wIc) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- [Chatbot with INFINITE MEMORY using `OpenAI` & `Pinecone` - `GPT-3`, `Embeddings`, `ADA`, `Vector DB`, `Semantic`](https://youtu.be/2xNzB7xq8nk) by [David Shapiro ~ AI](https://www.youtube.com/@DavidShapiroAutomator)
|
||||
- [LangChain for LLMs is... basically just an Ansible playbook](https://youtu.be/X51N9C-OhlE) by [David Shapiro ~ AI](https://www.youtube.com/@DavidShapiroAutomator)
|
||||
- [Build your own LLM Apps with LangChain & `GPT-Index`](https://youtu.be/-75p09zFUJY) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [`BabyAGI` - New System of Autonomous AI Agents with LangChain](https://youtu.be/lg3kJvf1kXo) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [Run `BabyAGI` with Langchain Agents (with Python Code)](https://youtu.be/WosPGHPObx8) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [How to Use Langchain With `Zapier` | Write and Send Email with GPT-3 | OpenAI API Tutorial](https://youtu.be/p9v2-xEa9A0) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [Use Your Locally Stored Files To Get Response From GPT - `OpenAI` | Langchain | Python](https://youtu.be/NC1Ni9KS-rk) by [Shweta Lodha](https://www.youtube.com/@shweta-lodha)
|
||||
- [`Langchain JS` | How to Use GPT-3, GPT-4 to Reference your own Data | `OpenAI Embeddings` Intro](https://youtu.be/veV2I-NEjaM) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [The easiest way to work with large language models | Learn LangChain in 10min](https://youtu.be/kmbS6FDQh7c) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [4 Autonomous AI Agents: “Westworld” simulation `BabyAGI`, `AutoGPT`, `Camel`, `LangChain`](https://youtu.be/yWbnH6inT_U) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [AI CAN SEARCH THE INTERNET? Langchain Agents + OpenAI ChatGPT](https://youtu.be/J-GL0htqda8) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Query Your Data with GPT-4 | Embeddings, Vector Databases | Langchain JS Knowledgebase](https://youtu.be/jRnUPUTkZmU) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [`Weaviate` + LangChain for LLM apps presented by Erika Cardenas](https://youtu.be/7AGj4Td5Lgw) by [`Weaviate` • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [Langchain Overview — How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
- [Langchain Overview - How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
- [LangChain Tutorials](https://www.youtube.com/watch?v=FuqdVNB_8c0&list=PL9V0lbeJ69brU-ojMpU1Y7Ic58Tap0Cw6) by [Edrick](https://www.youtube.com/@edrickdch):
|
||||
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- [LangChain 101: The Complete Beginner's Guide](https://youtu.be/P3MAbZ2eMUI)
|
||||
- [Custom langchain Agent & Tools with memory. Turn any `Python function` into langchain tool with Gpt 3](https://youtu.be/NIG8lXk0ULg) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [Building AI LLM Apps with LangChain (and more?) - LIVE STREAM](https://www.youtube.com/live/M-2Cj_2fzWI?feature=share) by [Nicholas Renotte](https://www.youtube.com/@NicholasRenotte)
|
||||
- [`ChatGPT` with any `YouTube` video using langchain and `chromadb`](https://youtu.be/TQZfB2bzVwU) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [How to Talk to a `PDF` using LangChain and `ChatGPT`](https://youtu.be/v2i1YDtrIwk) by [Automata Learning Lab](https://www.youtube.com/@automatalearninglab)
|
||||
- [Langchain Document Loaders Part 1: Unstructured Files](https://youtu.be/O5C0wfsen98) by [Merk](https://www.youtube.com/@merksworld)
|
||||
- [LangChain - Prompt Templates (what all the best prompt engineers use)](https://youtu.be/1aRu8b0XNOQ) by [Nick Daigler](https://www.youtube.com/@nick_daigs)
|
||||
- [LangChain. Crear aplicaciones Python impulsadas por GPT](https://youtu.be/DkW_rDndts8) by [Jesús Conde](https://www.youtube.com/@0utKast)
|
||||
- [Easiest Way to Use GPT In Your Products | LangChain Basics Tutorial](https://youtu.be/fLy0VenZyGc) by [Rachel Woods](https://www.youtube.com/@therachelwoods)
|
||||
- [`BabyAGI` + `GPT-4` Langchain Agent with Internet Access](https://youtu.be/wx1z_hs5P6E) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Learning LLM Agents. How does it actually work? LangChain, AutoGPT & OpenAI](https://youtu.be/mb_YAABSplk) by [Arnoldas Kemeklis](https://www.youtube.com/@processusAI)
|
||||
- [Get Started with LangChain in `Node.js`](https://youtu.be/Wxx1KUWJFv4) by [Developers Digest](https://www.youtube.com/@DevelopersDigest)
|
||||
- [LangChain + `OpenAI` tutorial: Building a Q&A system w/ own text data](https://youtu.be/DYOU_Z0hAwo) by [Samuel Chan](https://www.youtube.com/@SamuelChan)
|
||||
- [Langchain + `Zapier` Agent](https://youtu.be/yribLAb-pxA) by [Merk](https://www.youtube.com/@merksworld)
|
||||
- [Connecting the Internet with `ChatGPT` (LLMs) using Langchain And Answers Your Questions](https://youtu.be/9Y0TBC63yZg) by [Kamalraj M M](https://www.youtube.com/@insightbuilder)
|
||||
- [Build More Powerful LLM Applications for Business’s with LangChain (Beginners Guide)](https://youtu.be/sp3-WLKEcBg) by[ No Code Blackbox](https://www.youtube.com/@nocodeblackbox)
|
||||
- [LangFlow LLM Agent Demo for 🦜🔗LangChain](https://youtu.be/zJxDHaWt-6o) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [Chatbot Factory: Streamline Python Chatbot Creation with LLMs and Langchain](https://youtu.be/eYer3uzrcuM) by [Finxter](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [LangChain Tutorial - ChatGPT mit eigenen Daten](https://youtu.be/0XDLyY90E2c) by [Coding Crashkurse](https://www.youtube.com/@codingcrashkurse6429)
|
||||
- [Chat with a `CSV` | LangChain Agents Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [GoDataProf](https://www.youtube.com/@godataprof)
|
||||
- [Introdução ao Langchain - #Cortes - Live DataHackers](https://youtu.be/fw8y5VRei5Y) by [Prof. João Gabriel Lima](https://www.youtube.com/@profjoaogabriellima)
|
||||
- [LangChain: Level up `ChatGPT` !? | LangChain Tutorial Part 1](https://youtu.be/vxUGx8aZpDE) by [Code Affinity](https://www.youtube.com/@codeaffinitydev)
|
||||
- [KI schreibt krasses Youtube Skript 😲😳 | LangChain Tutorial Deutsch](https://youtu.be/QpTiXyK1jus) by [SimpleKI](https://www.youtube.com/@simpleki)
|
||||
- [Chat with Audio: Langchain, `Chroma DB`, OpenAI, and `Assembly AI`](https://youtu.be/Kjy7cx1r75g) by [AI Anytime](https://www.youtube.com/@AIAnytime)
|
||||
- [QA over documents with Auto vector index selection with Langchain router chains](https://youtu.be/9G05qybShv8) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [Build your own custom LLM application with `Bubble.io` & Langchain (No Code & Beginner friendly)](https://youtu.be/O7NhQGu1m6c) by [No Code Blackbox](https://www.youtube.com/@nocodeblackbox)
|
||||
- [Simple App to Question Your Docs: Leveraging `Streamlit`, `Hugging Face Spaces`, LangChain, and `Claude`!](https://youtu.be/X4YbNECRr7o) by [Chris Alexiuk](https://www.youtube.com/@chrisalexiuk)
|
||||
- [LANGCHAIN AI- `ConstitutionalChainAI` + Databutton AI ASSISTANT Web App](https://youtu.be/5zIU6_rdJCU) by [Avra](https://www.youtube.com/@Avra_b)
|
||||
- [LANGCHAIN AI AUTONOMOUS AGENT WEB APP - 👶 `BABY AGI` 🤖 with EMAIL AUTOMATION using `DATABUTTON`](https://youtu.be/cvAwOGfeHgw) by [Avra](https://www.youtube.com/@Avra_b)
|
||||
- [The Future of Data Analysis: Using A.I. Models in Data Analysis (LangChain)](https://youtu.be/v_LIcVyg5dk) by [Absent Data](https://www.youtube.com/@absentdata)
|
||||
- [Memory in LangChain | Deep dive (python)](https://youtu.be/70lqvTFh_Yg) by [Eden Marco](https://www.youtube.com/@EdenMarco)
|
||||
- [9 LangChain UseCases | Beginner's Guide | 2023](https://youtu.be/zS8_qosHNMw) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [Use Large Language Models in Jupyter Notebook | LangChain | Agents & Indexes](https://youtu.be/JSe11L1a_QQ) by [Abhinaw Tiwari](https://www.youtube.com/@AbhinawTiwariAT)
|
||||
- [How to Talk to Your Langchain Agent | `11 Labs` + `Whisper`](https://youtu.be/N4k459Zw2PU) by [VRSEN](https://www.youtube.com/@vrsen)
|
||||
- [LangChain Deep Dive: 5 FUN AI App Ideas To Build Quickly and Easily](https://youtu.be/mPYEPzLkeks) by [James NoCode](https://www.youtube.com/@jamesnocode)
|
||||
- [LangChain 101: Models](https://youtu.be/T6c_XsyaNSQ) by [Mckay Wrigley](https://www.youtube.com/@realmckaywrigley)
|
||||
- [LangChain with JavaScript Tutorial #1 | Setup & Using LLMs](https://youtu.be/W3AoeMrg27o) by [Leon van Zyl](https://www.youtube.com/@leonvanzyl)
|
||||
- [LangChain Overview & Tutorial for Beginners: Build Powerful AI Apps Quickly & Easily (ZERO CODE)](https://youtu.be/iI84yym473Q) by [James NoCode](https://www.youtube.com/@jamesnocode)
|
||||
- [LangChain In Action: Real-World Use Case With Step-by-Step Tutorial](https://youtu.be/UO699Szp82M) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- [Summarizing and Querying Multiple Papers with LangChain](https://youtu.be/p_MQRWH5Y6k) by [Automata Learning Lab](https://www.youtube.com/@automatalearninglab)
|
||||
- [Using Langchain (and `Replit`) through `Tana`, ask `Google`/`Wikipedia`/`Wolfram Alpha` to fill out a table](https://youtu.be/Webau9lEzoI) by [Stian Håklev](https://www.youtube.com/@StianHaklev)
|
||||
- [Langchain PDF App (GUI) | Create a ChatGPT For Your `PDF` in Python](https://youtu.be/wUAUdEw5oxM) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Auto-GPT with LangChain 🔥 | Create Your Own Personal AI Assistant](https://youtu.be/imDfPmMKEjM) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [Create Your OWN Slack AI Assistant with Python & LangChain](https://youtu.be/3jFXRNn2Bu8) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- [How to Create LOCAL Chatbots with GPT4All and LangChain [Full Guide]](https://youtu.be/4p1Fojur8Zw) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
|
||||
- [Build a `Multilingual PDF` Search App with LangChain, `Cohere` and `Bubble`](https://youtu.be/hOrtuumOrv8) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [Building a LangChain Agent (code-free!) Using `Bubble` and `Flowise`](https://youtu.be/jDJIIVWTZDE) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [Build a LangChain-based Semantic PDF Search App with No-Code Tools Bubble and Flowise](https://youtu.be/s33v5cIeqA4) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [LangChain Memory Tutorial | Building a ChatGPT Clone in Python](https://youtu.be/Cwq91cj2Pnc) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [ChatGPT For Your DATA | Chat with Multiple Documents Using LangChain](https://youtu.be/TeDgIDqQmzs) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [`Llama Index`: Chat with Documentation using URL Loader](https://youtu.be/XJRoDEctAwA) by [Merk](https://www.youtube.com/@merksworld)
|
||||
- [Using OpenAI, LangChain, and `Gradio` to Build Custom GenAI Applications](https://youtu.be/1MsmqMg3yUc) by [David Hundley](https://www.youtube.com/@dkhundley)
|
||||
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- [Build AI chatbot with custom knowledge base using OpenAI API and GPT Index](https://youtu.be/vDZAZuaXf48) by [Irina Nik](https://www.youtube.com/@irina_nik)
|
||||
- [Build Your Own Auto-GPT Apps with LangChain (Python Tutorial)](https://youtu.be/NYSWn1ipbgg) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Chat with a `CSV` | `LangChain Agents` Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Create Your Own ChatGPT with `PDF` Data in 5 Minutes (LangChain Tutorial)](https://youtu.be/au2WVVGUvc8) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
|
||||
- [Build a Custom Chatbot with OpenAI: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU) by [Fabrikod](https://www.youtube.com/@fabrikod)
|
||||
- [`Flowise` is an open-source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [LangChain & GPT 4 For Data Analysis: The `Pandas` Dataframe Agent](https://youtu.be/rFQ5Kmkd4jc) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw) by [Toolfinder AI](https://www.youtube.com/@toolfinderai)
|
||||
- [How to build with Langchain 10x easier | ⛓️ LangFlow & `Flowise`](https://youtu.be/Ya1oGL7ZTvU) by [AI Jason](https://www.youtube.com/@AIJasonZ)
|
||||
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg) by [Krish Naik](https://www.youtube.com/@krishnaik06)
|
||||
- ⛓ [Vector Embeddings Tutorial – Code Your Own AI Assistant with `GPT-4 API` + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=5uJhxoh2tvdnOXok) by [FreeCodeCamp.org](https://www.youtube.com/@freecodecamp)
|
||||
- ⛓ [Fully LOCAL `Llama 2` Q&A with LangChain](https://youtu.be/wgYctKFnQ74?si=UX1F3W-B3MqF4-K-) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- ⛓ [Fully LOCAL `Llama 2` Langchain on CPU](https://youtu.be/yhECvKMu8kM?si=IvjxwlA1c09VwHZ4) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- ⛓ [Build LangChain Audio Apps with Python in 5 Minutes](https://youtu.be/7w7ysaDz2W4?si=BvdMiyHhormr2-vr) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- ⛓ [`Voiceflow` & `Flowise`: Want to Beat Competition? New Tutorial with Real AI Chatbot](https://youtu.be/EZKkmeFwag0?si=-4dETYDHEstiK_bb) by [AI SIMP](https://www.youtube.com/@aisimp)
|
||||
- ⛓ [THIS Is How You Build Production-Ready AI Apps (`LangSmith` Tutorial)](https://youtu.be/tFXm5ijih98?si=lfiqpyaivxHFyI94) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- ⛓ [Build POWERFUL LLM Bots EASILY with Your Own Data - `Embedchain` - Langchain 2.0? (Tutorial)](https://youtu.be/jE24Y_GasE8?si=0yEDZt3BK5Q-LIuF) by [WorldofAI](https://www.youtube.com/@intheworldofai)
|
||||
- ⛓ [`Code Llama` powered Gradio App for Coding: Runs on CPU](https://youtu.be/AJOhV6Ryy5o?si=ouuQT6IghYlc1NEJ) by [AI Anytime](https://www.youtube.com/@AIAnytime)
|
||||
- ⛓ [LangChain Complete Course in One Video | Develop LangChain (AI) Based Solutions for Your Business](https://youtu.be/j9mQd-MyIg8?si=_wlNT3nP2LpDKztZ) by [UBprogrammer](https://www.youtube.com/@UBprogrammer)
|
||||
- ⛓ [How to Run `LLaMA` Locally on CPU or GPU | Python & Langchain & CTransformers Guide](https://youtu.be/SvjWDX2NqiM?si=DxFml8XeGhiLTzLV) by [Code With Prince](https://www.youtube.com/@CodeWithPrince)
|
||||
- ⛓ [PyData Heidelberg #11 - TimeSeries Forecasting & LLM Langchain](https://www.youtube.com/live/Glbwb5Hxu18?si=PIEY8Raq_C9PCHuW) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [Prompt Engineering in Web Development | Using LangChain and Templates with OpenAI](https://youtu.be/pK6WzlTOlYw?si=fkcDQsBG2h-DM8uQ) by [Akamai Developer
|
||||
](https://www.youtube.com/@AkamaiDeveloper)
|
||||
- ⛓ [Retrieval-Augmented Generation (RAG) using LangChain and `Pinecone` - The RAG Special Episode](https://youtu.be/J_tCD_J6w3s?si=60Mnr5VD9UED9bGG) by [Generative AI and Data Science On AWS](https://www.youtube.com/@GenerativeAIDataScienceOnAWS)
|
||||
- ⛓ [`LLAMA2 70b-chat` Multiple Documents Chatbot with Langchain & Streamlit |All OPEN SOURCE|Replicate API](https://youtu.be/vhghB81vViM?si=dszzJnArMeac7lyc) by [DataInsightEdge](https://www.youtube.com/@DataInsightEdge01)
|
||||
- ⛓ [Chatting with 44K Fashion Products: LangChain Opportunities and Pitfalls](https://youtu.be/Zudgske0F_s?si=8HSshHoEhh0PemJA) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- ⛓ [Structured Data Extraction from `ChatGPT` with LangChain](https://youtu.be/q1lYg8JISpQ?si=0HctzOHYZvq62sve) by [MG](https://www.youtube.com/@MG_cafe)
|
||||
- ⛓ [Chat with Multiple PDFs using `Llama 2`, `Pinecone` and LangChain (Free LLMs and Embeddings)](https://youtu.be/TcJ_tVSGS4g?si=FZYnMDJyoFfL3Z2i) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Integrate Audio into `LangChain.js` apps in 5 Minutes](https://youtu.be/hNpUSaYZIzs?si=Gb9h7W9A8lzfvFKi) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- ⛓ [`ChatGPT` for your data with Local LLM](https://youtu.be/bWrjpwhHEMU?si=uM6ZZ18z9og4M90u) by [Jacob Jedryszek](https://www.youtube.com/@jj09)
|
||||
- ⛓ [Training `Chatgpt` with your personal data using langchain step by step in detail](https://youtu.be/j3xOMde2v9Y?si=179HsiMU-hEPuSs4) by [NextGen Machines](https://www.youtube.com/@MayankGupta-kb5yc)
|
||||
- ⛓ [Use ANY language in `LangSmith` with REST](https://youtu.be/7BL0GEdMmgY?si=iXfOEdBLqXF6hqRM) by [Nerding I/O](https://www.youtube.com/@nerding_io)
|
||||
- ⛓ [How to Leverage the Full Potential of LLMs for Your Business with Langchain - Leon Ruddat](https://youtu.be/vZmoEa7oWMg?si=ZhMmydq7RtkZd56Q) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [`ChatCSV` App: Chat with CSV files using LangChain and `Llama 2`](https://youtu.be/PvsMg6jFs8E?si=Qzg5u5gijxj933Ya) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Build Chat PDF app in Python with LangChain, OpenAI, Streamlit | Full project | Learn Coding](https://www.youtube.com/watch?v=WYzFzZg4YZI) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
- ⛓ [Build Eminem Bot App with LangChain, Streamlit, OpenAI | Full Python Project | Tutorial | AI ChatBot](https://www.youtube.com/watch?v=a2shHB4MRZ4) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
|
||||
|
||||
### [Prompt Engineering and LangChain](https://www.youtube.com/watch?v=muXbPpG_ys4&list=PLEJK-H61Xlwzm5FYLDdKt_6yibO33zoMW) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov)
|
||||
- [Getting Started with LangChain: Load Custom Data, Run OpenAI Models, Embeddings and `ChatGPT`](https://www.youtube.com/watch?v=muXbPpG_ys4)
|
||||
- [Loaders, Indexes & Vectorstores in LangChain: Question Answering on `PDF` files with `ChatGPT`](https://www.youtube.com/watch?v=FQnvfR8Dmr0)
|
||||
- [LangChain Models: `ChatGPT`, `Flan Alpaca`, `OpenAI Embeddings`, Prompt Templates & Streaming](https://www.youtube.com/watch?v=zy6LiK5F5-s)
|
||||
- [LangChain Chains: Use `ChatGPT` to Build Conversational Agents, Summaries and Q&A on Text With LLMs](https://www.youtube.com/watch?v=h1tJZQPcimM)
|
||||
- [Analyze Custom CSV Data with `GPT-4` using Langchain](https://www.youtube.com/watch?v=Ew3sGdX8at4)
|
||||
- [Build ChatGPT Chatbots with LangChain Memory: Understanding and Implementing Memory in Conversations](https://youtu.be/CyuUlf54wTs)
|
||||
|
||||
- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)
|
||||
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)
|
||||
- [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)
|
||||
- [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)
|
||||
- [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)
|
||||
- [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)
|
||||
- [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)
|
||||
- [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)
|
||||
- [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)
|
||||
- [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)
|
||||
- [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)
|
||||
- [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)
|
||||
- [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)
|
||||
- [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)
|
||||
- [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)
|
||||
- [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)
|
||||
- [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)
|
||||
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)
|
||||
- [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)
|
||||
- [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)
|
||||
- [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)
|
||||
- [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)
|
||||
- [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)
|
||||
- [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)
|
||||
- [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)
|
||||
- [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)
|
||||
- [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)
|
||||
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)
|
||||
- [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)
|
||||
- [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)
|
||||
- [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)
|
||||
- [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)
|
||||
- [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)
|
||||
- [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)
|
||||
- [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)
|
||||
- [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)
|
||||
- [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)
|
||||
- [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)
|
||||
- [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)
|
||||
- [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)
|
||||
- [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)
|
||||
- [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)
|
||||
- [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)
|
||||
- [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)
|
||||
- [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)
|
||||
- [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)
|
||||
- [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)
|
||||
- [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)
|
||||
|
||||
---------------------
|
||||
[Updated 2024-05-16]
|
||||
⛓ icon marks a new addition [last update 2024-02-04]
|
||||
|
||||
@@ -1,10 +1,27 @@
|
||||
# langchain-core
|
||||
|
||||
## 0.1.x
|
||||
## 0.1.7 (Jan 5, 2024)
|
||||
|
||||
#### Deleted
|
||||
|
||||
No deletions.
|
||||
|
||||
#### Deprecated
|
||||
|
||||
- `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead.
|
||||
- `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead.
|
||||
- `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead.
|
||||
- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead.
|
||||
- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead.
|
||||
|
||||
#### Fixed
|
||||
|
||||
- Restrict recursive URL scraping: [#15559](https://github.com/langchain-ai/langchain/pull/15559)
|
||||
|
||||
#### Added
|
||||
|
||||
No additions.
|
||||
|
||||
#### Beta
|
||||
|
||||
- Marked `langchain_core.load.load` and `langchain_core.load.loads` as beta.
|
||||
- Marked `langchain_core.beta.runnables.context.ContextGet` and `langchain_core.beta.runnables.context.ContextSet` as beta.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user