mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-16 18:24:31 +00:00
Compare commits
112 Commits
pydantic/b
...
bagatur/rf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40882d2cfb | ||
|
|
ac3e87c1a0 | ||
|
|
0fa4516ce4 | ||
|
|
04f2d69b83 | ||
|
|
0fea987dd2 | ||
|
|
00eff8c4a7 | ||
|
|
6c308aabae | ||
|
|
8bc1a3dca8 | ||
|
|
652c542b2f | ||
|
|
7c0b1b8171 | ||
|
|
d09cdb4880 | ||
|
|
3d1095218c | ||
|
|
949b2cf177 | ||
|
|
66a47d9a61 | ||
|
|
2a3758a98e | ||
|
|
dda5b1e370 | ||
|
|
de1f63505b | ||
|
|
4999e8af7e | ||
|
|
0565d81dc5 | ||
|
|
9f08d29bc8 | ||
|
|
249752e8ee | ||
|
|
973866c894 | ||
|
|
b2e6d01e8f | ||
|
|
875ea4b4c6 | ||
|
|
c7a5bb6031 | ||
|
|
28e1ee4891 | ||
|
|
a7eba8b006 | ||
|
|
d11841d760 | ||
|
|
05aa02005b | ||
|
|
f116e10d53 | ||
|
|
bb4f7936f9 | ||
|
|
a03003f5fd | ||
|
|
85a1c6d0b7 | ||
|
|
9930ddc555 | ||
|
|
02c5c13a6e | ||
|
|
fdbeb52756 | ||
|
|
0c8a88b3fa | ||
|
|
08feed3332 | ||
|
|
a758496236 | ||
|
|
103094286e | ||
|
|
fd8fe209cb | ||
|
|
e51bccdb28 | ||
|
|
09a92bb9bf | ||
|
|
a214fe8a2d | ||
|
|
a956b69720 | ||
|
|
d87cfd33e8 | ||
|
|
be9bc62f8b | ||
|
|
0808949e54 | ||
|
|
129d056085 | ||
|
|
5b3dbf12a5 | ||
|
|
9f545825b7 | ||
|
|
616e728ef9 | ||
|
|
83d2a871eb | ||
|
|
292ae8468e | ||
|
|
b58d492e05 | ||
|
|
df8e35fd81 | ||
|
|
083726ecda | ||
|
|
82f28ca9ef | ||
|
|
82fb56b79c | ||
|
|
99e5eaa9b1 | ||
|
|
d4f790fd40 | ||
|
|
c29fbede59 | ||
|
|
eee0d1d0dd | ||
|
|
ade683c589 | ||
|
|
50b8f4dcc7 | ||
|
|
2b06792c81 | ||
|
|
61e4a06447 | ||
|
|
ead04487fd | ||
|
|
8976483f3a | ||
|
|
edcb03943e | ||
|
|
89a8121eaa | ||
|
|
d5eb228874 | ||
|
|
463019ac3e | ||
|
|
a3dd4dcadf | ||
|
|
9417961b17 | ||
|
|
0689628489 | ||
|
|
0dd2c21089 | ||
|
|
589927e9e1 | ||
|
|
5d60ced7b3 | ||
|
|
ce78877a87 | ||
|
|
0c4683ebcc | ||
|
|
b11c233304 | ||
|
|
8c986221e4 | ||
|
|
8f2d321dd0 | ||
|
|
019aa04b06 | ||
|
|
77b359edf5 | ||
|
|
7e63270e04 | ||
|
|
a69d1b84f4 | ||
|
|
f2560188ec | ||
|
|
c0d67420e5 | ||
|
|
c194828be0 | ||
|
|
c71afb46d1 | ||
|
|
995ef8a7fc | ||
|
|
de8dfde7f7 | ||
|
|
e842131425 | ||
|
|
6dedd94ba4 | ||
|
|
c5e23293f8 | ||
|
|
90d7c55343 | ||
|
|
3c8e9a9641 | ||
|
|
2673b3a314 | ||
|
|
4c2de2a7f2 | ||
|
|
1c089cadd7 | ||
|
|
2e8733cf54 | ||
|
|
b04e472acf | ||
|
|
090411842e | ||
|
|
84a97d55e1 | ||
|
|
09aa1eac03 | ||
|
|
0f9f213833 | ||
|
|
15f1af8ed6 | ||
|
|
8bebc9206f | ||
|
|
a3c79b1909 | ||
|
|
23928a3311 |
17
.github/CONTRIBUTING.md
vendored
17
.github/CONTRIBUTING.md
vendored
@@ -33,7 +33,7 @@ best way to get our attention.
|
||||
### 🚩GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/hwchase17/langchain/issues) page is kept up to date
|
||||
with bugs, improvements, and feature requests.
|
||||
with bugs, improvements, and feature requests.
|
||||
|
||||
There is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help
|
||||
organize issues.
|
||||
@@ -61,11 +61,11 @@ we do not want these to get in the way of getting good code into the codebase.
|
||||
|
||||
> **Note:** You can run this repository locally (which is described below) or in a [development container](https://containers.dev/) (which is described in the [.devcontainer folder](https://github.com/hwchase17/langchain/tree/master/.devcontainer)).
|
||||
|
||||
This project uses [Poetry](https://python-poetry.org/) as a dependency manager. Check out Poetry's [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding.
|
||||
This project uses [Poetry](https://python-poetry.org/) v1.5.1 as a dependency manager. Check out Poetry's [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding.
|
||||
|
||||
❗Note: If you use `Conda` or `Pyenv` as your environment / package manager, avoid dependency conflicts by doing the following first:
|
||||
1. *Before installing Poetry*, create and activate a new Conda env (e.g. `conda create -n langchain python=3.9`)
|
||||
2. Install Poetry (see above)
|
||||
2. Install Poetry v1.5.1 (see above)
|
||||
3. Tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
|
||||
4. Continue with the following steps.
|
||||
|
||||
@@ -73,7 +73,7 @@ There are two separate projects in this repository:
|
||||
- `langchain`: core langchain code, abstractions, and use cases
|
||||
- `langchain.experimental`: more experimental code
|
||||
|
||||
Each of these has their OWN development environment.
|
||||
Each of these has their OWN development environment.
|
||||
In order to run any of the commands below, please move into their respective directories.
|
||||
For example, to contribute to `langchain` run `cd libs/langchain` before getting started with the below.
|
||||
|
||||
@@ -85,7 +85,7 @@ poetry install -E all
|
||||
|
||||
This will install all requirements for running the package, examples, linting, formatting, tests, and coverage. Note the `-E all` flag will install all optional dependencies necessary for integration testing.
|
||||
|
||||
❗Note: If you're running Poetry 1.4.1 and receive a `WheelFileValidationError` for `debugpy` during installation, you can try either downgrading to Poetry 1.4.0 or disabling "modern installation" (`poetry config installer.modern-installation false`) and re-install requirements. See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
❗Note: If during installation you receive a `WheelFileValidationError` for `debugpy`, please make sure you are running Poetry v1.5.1. This bug was present in older versions of Poetry (e.g. 1.4.1) and has been resolved in newer releases. If you are still seeing this bug on v1.5.1, you may also try disabling "modern installation" (`poetry config installer.modern-installation false`) and re-installing requirements. See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
Now, you should be able to run the common tasks in the following section. To double check, run `make test`, all tests should pass. If they don't you may need to pip install additional dependencies, such as `numexpr` and `openapi_schema_pydantic`.
|
||||
|
||||
@@ -175,9 +175,9 @@ If you're adding a new dependency to Langchain, assume that it will be an option
|
||||
that most users won't have it installed.
|
||||
|
||||
Users that do not have the dependency installed should be able to **import** your code without
|
||||
any side effects (no warnings, no errors, no exceptions).
|
||||
any side effects (no warnings, no errors, no exceptions).
|
||||
|
||||
To introduce the dependency to the pyproject.toml file correctly, please do the following:
|
||||
To introduce the dependency to the pyproject.toml file correctly, please do the following:
|
||||
|
||||
1. Add the dependency to the main group as an optional dependency
|
||||
```bash
|
||||
@@ -220,7 +220,7 @@ If you add new logic, please add a unit test.
|
||||
|
||||
Integration tests cover logic that requires making calls to outside APIs (often integration with other services).
|
||||
|
||||
**warning** Almost no tests should be integration tests.
|
||||
**warning** Almost no tests should be integration tests.
|
||||
|
||||
Tests that require making network connections make it difficult for other
|
||||
developers to test the code.
|
||||
@@ -307,4 +307,3 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
|
||||
|
||||
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or in another manner.
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve LangChain
|
||||
description: Submit a bug report to help us improve LangChain. To report a security issue, please instead use the security option below.
|
||||
labels: ["02 Bug Report"]
|
||||
body:
|
||||
- type: markdown
|
||||
|
||||
10
.github/actions/poetry_setup/action.yml
vendored
10
.github/actions/poetry_setup/action.yml
vendored
@@ -47,8 +47,12 @@ runs:
|
||||
~/.cache/pip
|
||||
key: pip-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}
|
||||
|
||||
- run: pipx install poetry==${{ inputs.poetry-version }} --python python${{ inputs.python-version }}
|
||||
- name: Install poetry
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
run: pipx install "poetry==$POETRY_VERSION" --python "python$PYTHON_VERSION" --verbose
|
||||
|
||||
- name: Check Poetry File
|
||||
shell: bash
|
||||
@@ -66,12 +70,14 @@ runs:
|
||||
id: cache-poetry
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pypoetry/virtualenvs
|
||||
~/.cache/pypoetry/cache
|
||||
~/.cache/pypoetry/artifacts
|
||||
key: poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles('poetry.lock') }}
|
||||
${{ env.WORKDIR }}/.venv
|
||||
key: poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- run: ${{ inputs.install-command }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
606
.github/tools/git-restore-mtime
vendored
Executable file
606
.github/tools/git-restore-mtime
vendored
Executable file
@@ -0,0 +1,606 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# git-restore-mtime - Change mtime of files based on commit date of last change
|
||||
#
|
||||
# Copyright (C) 2012 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. See <http://www.gnu.org/licenses/gpl.html>
|
||||
#
|
||||
# Source: https://github.com/MestreLion/git-tools
|
||||
# Version: July 13, 2023 (commit hash 5f832e72453e035fccae9d63a5056918d64476a2)
|
||||
"""
|
||||
Change the modification time (mtime) of files in work tree, based on the
|
||||
date of the most recent commit that modified the file, including renames.
|
||||
|
||||
Ignores untracked files and uncommitted deletions, additions and renames, and
|
||||
by default modifications too.
|
||||
---
|
||||
Useful prior to generating release tarballs, so each file is archived with a
|
||||
date that is similar to the date when the file was actually last modified,
|
||||
assuming the actual modification date and its commit date are close.
|
||||
"""
|
||||
|
||||
# TODO:
|
||||
# - Add -z on git whatchanged/ls-files, so we don't deal with filename decoding
|
||||
# - When Python is bumped to 3.7, use text instead of universal_newlines on subprocess
|
||||
# - Update "Statistics for some large projects" with modern hardware and repositories.
|
||||
# - Create a README.md for git-restore-mtime alone. It deserves extensive documentation
|
||||
# - Move Statistics there
|
||||
# - See git-extras as a good example on project structure and documentation
|
||||
|
||||
# FIXME:
|
||||
# - When current dir is outside the worktree, e.g. using --work-tree, `git ls-files`
|
||||
# assume any relative pathspecs are to worktree root, not the current dir. As such,
|
||||
# relative pathspecs may not work.
|
||||
# - Renames are tricky:
|
||||
# - R100 should not change mtime, but original name is not on filelist. Should
|
||||
# track renames until a valid (A, M) mtime found and then set on current name.
|
||||
# - Should set mtime for both current and original directories.
|
||||
# - Check mode changes with unchanged blobs?
|
||||
# - Check file (A, D) for the directory mtime is not sufficient:
|
||||
# - Renames also change dir mtime, unless rename was on a parent dir
|
||||
# - If most recent change of all files in a dir was a Modification (M),
|
||||
# dir might not be touched at all.
|
||||
# - Dirs containing only subdirectories but no direct files will also
|
||||
# not be touched. They're files' [grand]parent dir, but never their dirname().
|
||||
# - Some solutions:
|
||||
# - After files done, perform some dir processing for missing dirs, finding latest
|
||||
# file (A, D, R)
|
||||
# - Simple approach: dir mtime is the most recent child (dir or file) mtime
|
||||
# - Use a virtual concept of "created at most at" to fill missing info, bubble up
|
||||
# to parents and grandparents
|
||||
# - When handling [grand]parent dirs, stay inside <pathspec>
|
||||
# - Better handling of merge commits. `-m` is plain *wrong*. `-c/--cc` is perfect, but
|
||||
# painfully slow. First pass without merge commits is not accurate. Maybe add a new
|
||||
# `--accurate` mode for `--cc`?
|
||||
|
||||
if __name__ != "__main__":
|
||||
raise ImportError("{} should not be used as a module.".format(__name__))
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import logging
|
||||
import os.path
|
||||
import shlex
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
__version__ = "2022.12+dev"
|
||||
|
||||
# Update symlinks only if the platform supports not following them
|
||||
UPDATE_SYMLINKS = bool(os.utime in getattr(os, 'supports_follow_symlinks', []))
|
||||
|
||||
# Call os.path.normpath() only if not in a POSIX platform (Windows)
|
||||
NORMALIZE_PATHS = (os.path.sep != '/')
|
||||
|
||||
# How many files to process in each batch when re-trying merge commits
|
||||
STEPMISSING = 100
|
||||
|
||||
# (Extra) keywords for the os.utime() call performed by touch()
|
||||
UTIME_KWS = {} if not UPDATE_SYMLINKS else {'follow_symlinks': False}
|
||||
|
||||
|
||||
# Command-line interface ######################################################
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__.split('\n---')[0])
|
||||
|
||||
group = parser.add_mutually_exclusive_group()
|
||||
group.add_argument('--quiet', '-q', dest='loglevel',
|
||||
action="store_const", const=logging.WARNING, default=logging.INFO,
|
||||
help="Suppress informative messages and summary statistics.")
|
||||
group.add_argument('--verbose', '-v', action="count", help="""
|
||||
Print additional information for each processed file.
|
||||
Specify twice to further increase verbosity.
|
||||
""")
|
||||
|
||||
parser.add_argument('--cwd', '-C', metavar="DIRECTORY", help="""
|
||||
Run as if %(prog)s was started in directory %(metavar)s.
|
||||
This affects how --work-tree, --git-dir and PATHSPEC arguments are handled.
|
||||
See 'man 1 git' or 'git --help' for more information.
|
||||
""")
|
||||
|
||||
parser.add_argument('--git-dir', dest='gitdir', metavar="GITDIR", help="""
|
||||
Path to the git repository, by default auto-discovered by searching
|
||||
the current directory and its parents for a .git/ subdirectory.
|
||||
""")
|
||||
|
||||
parser.add_argument('--work-tree', dest='workdir', metavar="WORKTREE", help="""
|
||||
Path to the work tree root, by default the parent of GITDIR if it's
|
||||
automatically discovered, or the current directory if GITDIR is set.
|
||||
""")
|
||||
|
||||
parser.add_argument('--force', '-f', default=False, action="store_true", help="""
|
||||
Force updating files with uncommitted modifications.
|
||||
Untracked files and uncommitted deletions, renames and additions are
|
||||
always ignored.
|
||||
""")
|
||||
|
||||
parser.add_argument('--merge', '-m', default=False, action="store_true", help="""
|
||||
Include merge commits.
|
||||
Leads to more recent times and more files per commit, thus with the same
|
||||
time, which may or may not be what you want.
|
||||
Including merge commits may lead to fewer commits being evaluated as files
|
||||
are found sooner, which can improve performance, sometimes substantially.
|
||||
But as merge commits are usually huge, processing them may also take longer.
|
||||
By default, merge commits are only used for files missing from regular commits.
|
||||
""")
|
||||
|
||||
parser.add_argument('--first-parent', default=False, action="store_true", help="""
|
||||
Consider only the first parent, the "main branch", when evaluating merge commits.
|
||||
Only effective when merge commits are processed, either when --merge is
|
||||
used or when finding missing files after the first regular log search.
|
||||
See --skip-missing.
|
||||
""")
|
||||
|
||||
parser.add_argument('--skip-missing', '-s', dest="missing", default=True,
|
||||
action="store_false", help="""
|
||||
Do not try to find missing files.
|
||||
If merge commits were not evaluated with --merge and some files were
|
||||
not found in regular commits, by default %(prog)s searches for these
|
||||
files again in the merge commits.
|
||||
This option disables this retry, so files found only in merge commits
|
||||
will not have their timestamp updated.
|
||||
""")
|
||||
|
||||
parser.add_argument('--no-directories', '-D', dest='dirs', default=True,
|
||||
action="store_false", help="""
|
||||
Do not update directory timestamps.
|
||||
By default, use the time of its most recently created, renamed or deleted file.
|
||||
Note that just modifying a file will NOT update its directory time.
|
||||
""")
|
||||
|
||||
parser.add_argument('--test', '-t', default=False, action="store_true",
|
||||
help="Test run: do not actually update any file timestamp.")
|
||||
|
||||
parser.add_argument('--commit-time', '-c', dest='commit_time', default=False,
|
||||
action='store_true', help="Use commit time instead of author time.")
|
||||
|
||||
parser.add_argument('--oldest-time', '-o', dest='reverse_order', default=False,
|
||||
action='store_true', help="""
|
||||
Update times based on the oldest, instead of the most recent commit of a file.
|
||||
This reverses the order in which the git log is processed to emulate a
|
||||
file "creation" date. Note this will be inaccurate for files deleted and
|
||||
re-created at later dates.
|
||||
""")
|
||||
|
||||
parser.add_argument('--skip-older-than', metavar='SECONDS', type=int, help="""
|
||||
Ignore files that are currently older than %(metavar)s.
|
||||
Useful in workflows that assume such files already have a correct timestamp,
|
||||
as it may improve performance by processing fewer files.
|
||||
""")
|
||||
|
||||
parser.add_argument('--skip-older-than-commit', '-N', default=False,
|
||||
action='store_true', help="""
|
||||
Ignore files older than the timestamp it would be updated to.
|
||||
Such files may be considered "original", likely in the author's repository.
|
||||
""")
|
||||
|
||||
parser.add_argument('--unique-times', default=False, action="store_true", help="""
|
||||
Set the microseconds to a unique value per commit.
|
||||
Allows telling apart changes that would otherwise have identical timestamps,
|
||||
as git's time accuracy is in seconds.
|
||||
""")
|
||||
|
||||
parser.add_argument('pathspec', nargs='*', metavar='PATHSPEC', help="""
|
||||
Only modify paths matching %(metavar)s, relative to current directory.
|
||||
By default, update all but untracked files and submodules.
|
||||
""")
|
||||
|
||||
parser.add_argument('--version', '-V', action='version',
|
||||
version='%(prog)s version {version}'.format(version=get_version()))
|
||||
|
||||
args_ = parser.parse_args()
|
||||
if args_.verbose:
|
||||
args_.loglevel = max(logging.TRACE, logging.DEBUG // args_.verbose)
|
||||
args_.debug = args_.loglevel <= logging.DEBUG
|
||||
return args_
|
||||
|
||||
|
||||
def get_version(version=__version__):
|
||||
if not version.endswith('+dev'):
|
||||
return version
|
||||
try:
|
||||
cwd = os.path.dirname(os.path.realpath(__file__))
|
||||
return Git(cwd=cwd, errors=False).describe().lstrip('v')
|
||||
except Git.Error:
|
||||
return '-'.join((version, "unknown"))
|
||||
|
||||
|
||||
# Helper functions ############################################################
|
||||
|
||||
def setup_logging():
|
||||
"""Add TRACE logging level and corresponding method, return the root logger"""
|
||||
logging.TRACE = TRACE = logging.DEBUG // 2
|
||||
logging.Logger.trace = lambda _, m, *a, **k: _.log(TRACE, m, *a, **k)
|
||||
return logging.getLogger()
|
||||
|
||||
|
||||
def normalize(path):
|
||||
r"""Normalize paths from git, handling non-ASCII characters.
|
||||
|
||||
Git stores paths as UTF-8 normalization form C.
|
||||
If path contains non-ASCII or non-printable characters, git outputs the UTF-8
|
||||
in octal-escaped notation, escaping double-quotes and backslashes, and then
|
||||
double-quoting the whole path.
|
||||
https://git-scm.com/docs/git-config#Documentation/git-config.txt-corequotePath
|
||||
|
||||
This function reverts this encoding, so:
|
||||
normalize(r'"Back\\slash_double\"quote_a\303\247a\303\255"') =>
|
||||
r'Back\slash_double"quote_açaí')
|
||||
|
||||
Paths with invalid UTF-8 encoding, such as single 0x80-0xFF bytes (e.g, from
|
||||
Latin1/Windows-1251 encoding) are decoded using surrogate escape, the same
|
||||
method used by Python for filesystem paths. So 0xE6 ("æ" in Latin1, r'\\346'
|
||||
from Git) is decoded as "\udce6". See https://peps.python.org/pep-0383/ and
|
||||
https://vstinner.github.io/painful-history-python-filesystem-encoding.html
|
||||
|
||||
Also see notes on `windows/non-ascii-paths.txt` about path encodings on
|
||||
non-UTF-8 platforms and filesystems.
|
||||
"""
|
||||
if path and path[0] == '"':
|
||||
# Python 2: path = path[1:-1].decode("string-escape")
|
||||
# Python 3: https://stackoverflow.com/a/46650050/624066
|
||||
path = (path[1:-1] # Remove enclosing double quotes
|
||||
.encode('latin1') # Convert to bytes, required by 'unicode-escape'
|
||||
.decode('unicode-escape') # Perform the actual octal-escaping decode
|
||||
.encode('latin1') # 1:1 mapping to bytes, UTF-8 encoded
|
||||
.decode('utf8', 'surrogateescape')) # Decode from UTF-8
|
||||
if NORMALIZE_PATHS:
|
||||
# Make sure the slash matches the OS; for Windows we need a backslash
|
||||
path = os.path.normpath(path)
|
||||
return path
|
||||
|
||||
|
||||
def dummy(*_args, **_kwargs):
|
||||
"""No-op function used in dry-run tests"""
|
||||
|
||||
|
||||
def touch(path, mtime):
|
||||
"""The actual mtime update"""
|
||||
os.utime(path, (mtime, mtime), **UTIME_KWS)
|
||||
|
||||
|
||||
def touch_ns(path, mtime_ns):
|
||||
"""The actual mtime update, using nanoseconds for unique timestamps"""
|
||||
os.utime(path, None, ns=(mtime_ns, mtime_ns), **UTIME_KWS)
|
||||
|
||||
|
||||
def isodate(secs: int):
|
||||
# time.localtime() accepts floats, but discards fractional part
|
||||
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(secs))
|
||||
|
||||
|
||||
def isodate_ns(ns: int):
|
||||
# for integers fromtimestamp() is equivalent and ~16% slower than isodate()
|
||||
return datetime.datetime.fromtimestamp(ns / 1000000000).isoformat(sep=' ')
|
||||
|
||||
|
||||
def get_mtime_ns(secs: int, idx: int):
|
||||
# Time resolution for filesystems and functions:
|
||||
# ext-4 and other POSIX filesystems: 1 nanosecond
|
||||
# NTFS (Windows default): 100 nanoseconds
|
||||
# datetime.datetime() (due to 64-bit float epoch): 1 microsecond
|
||||
us = idx % 1000000 # 10**6
|
||||
return 1000 * (1000000 * secs + us)
|
||||
|
||||
|
||||
def get_mtime_path(path):
|
||||
return os.path.getmtime(path)
|
||||
|
||||
|
||||
# Git class and parse_log(), the heart of the script ##########################
|
||||
|
||||
class Git:
|
||||
def __init__(self, workdir=None, gitdir=None, cwd=None, errors=True):
|
||||
self.gitcmd = ['git']
|
||||
self.errors = errors
|
||||
self._proc = None
|
||||
if workdir: self.gitcmd.extend(('--work-tree', workdir))
|
||||
if gitdir: self.gitcmd.extend(('--git-dir', gitdir))
|
||||
if cwd: self.gitcmd.extend(('-C', cwd))
|
||||
self.workdir, self.gitdir = self._get_repo_dirs()
|
||||
|
||||
def ls_files(self, paths: list = None):
|
||||
return (normalize(_) for _ in self._run('ls-files --full-name', paths))
|
||||
|
||||
def ls_dirty(self, force=False):
|
||||
return (normalize(_[3:].split(' -> ', 1)[-1])
|
||||
for _ in self._run('status --porcelain')
|
||||
if _[:2] != '??' and (not force or (_[0] in ('R', 'A')
|
||||
or _[1] == 'D')))
|
||||
|
||||
def log(self, merge=False, first_parent=False, commit_time=False,
|
||||
reverse_order=False, paths: list = None):
|
||||
cmd = 'whatchanged --pretty={}'.format('%ct' if commit_time else '%at')
|
||||
if merge: cmd += ' -m'
|
||||
if first_parent: cmd += ' --first-parent'
|
||||
if reverse_order: cmd += ' --reverse'
|
||||
return self._run(cmd, paths)
|
||||
|
||||
def describe(self):
|
||||
return self._run('describe --tags', check=True)[0]
|
||||
|
||||
def terminate(self):
|
||||
if self._proc is None:
|
||||
return
|
||||
try:
|
||||
self._proc.terminate()
|
||||
except OSError:
|
||||
# Avoid errors on OpenBSD
|
||||
pass
|
||||
|
||||
def _get_repo_dirs(self):
|
||||
return (os.path.normpath(_) for _ in
|
||||
self._run('rev-parse --show-toplevel --absolute-git-dir', check=True))
|
||||
|
||||
def _run(self, cmdstr: str, paths: list = None, output=True, check=False):
|
||||
cmdlist = self.gitcmd + shlex.split(cmdstr)
|
||||
if paths:
|
||||
cmdlist.append('--')
|
||||
cmdlist.extend(paths)
|
||||
popen_args = dict(universal_newlines=True, encoding='utf8')
|
||||
if not self.errors:
|
||||
popen_args['stderr'] = subprocess.DEVNULL
|
||||
log.trace("Executing: %s", ' '.join(cmdlist))
|
||||
if not output:
|
||||
return subprocess.call(cmdlist, **popen_args)
|
||||
if check:
|
||||
try:
|
||||
stdout: str = subprocess.check_output(cmdlist, **popen_args)
|
||||
return stdout.splitlines()
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise self.Error(e.returncode, e.cmd, e.output, e.stderr)
|
||||
self._proc = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, **popen_args)
|
||||
return (_.rstrip() for _ in self._proc.stdout)
|
||||
|
||||
def __del__(self):
|
||||
self.terminate()
|
||||
|
||||
class Error(subprocess.CalledProcessError):
|
||||
"""Error from git executable"""
|
||||
|
||||
|
||||
def parse_log(filelist, dirlist, stats, git, merge=False, filterlist=None):
|
||||
mtime = 0
|
||||
datestr = isodate(0)
|
||||
for line in git.log(
|
||||
merge,
|
||||
args.first_parent,
|
||||
args.commit_time,
|
||||
args.reverse_order,
|
||||
filterlist
|
||||
):
|
||||
stats['loglines'] += 1
|
||||
|
||||
# Blank line between Date and list of files
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Date line
|
||||
if line[0] != ':': # Faster than `not line.startswith(':')`
|
||||
stats['commits'] += 1
|
||||
mtime = int(line)
|
||||
if args.unique_times:
|
||||
mtime = get_mtime_ns(mtime, stats['commits'])
|
||||
if args.debug:
|
||||
datestr = isodate(mtime)
|
||||
continue
|
||||
|
||||
# File line: three tokens if it describes a renaming, otherwise two
|
||||
tokens = line.split('\t')
|
||||
|
||||
# Possible statuses:
|
||||
# M: Modified (content changed)
|
||||
# A: Added (created)
|
||||
# D: Deleted
|
||||
# T: Type changed: to/from regular file, symlinks, submodules
|
||||
# R099: Renamed (moved), with % of unchanged content. 100 = pure rename
|
||||
# Not possible in log: C=Copied, U=Unmerged, X=Unknown, B=pairing Broken
|
||||
status = tokens[0].split(' ')[-1]
|
||||
file = tokens[-1]
|
||||
|
||||
# Handles non-ASCII chars and OS path separator
|
||||
file = normalize(file)
|
||||
|
||||
def do_file():
|
||||
if args.skip_older_than_commit and get_mtime_path(file) <= mtime:
|
||||
stats['skip'] += 1
|
||||
return
|
||||
if args.debug:
|
||||
log.debug("%d\t%d\t%d\t%s\t%s",
|
||||
stats['loglines'], stats['commits'], stats['files'],
|
||||
datestr, file)
|
||||
try:
|
||||
touch(os.path.join(git.workdir, file), mtime)
|
||||
stats['touches'] += 1
|
||||
except Exception as e:
|
||||
log.error("ERROR: %s: %s", e, file)
|
||||
stats['errors'] += 1
|
||||
|
||||
def do_dir():
|
||||
if args.debug:
|
||||
log.debug("%d\t%d\t-\t%s\t%s",
|
||||
stats['loglines'], stats['commits'],
|
||||
datestr, "{}/".format(dirname or '.'))
|
||||
try:
|
||||
touch(os.path.join(git.workdir, dirname), mtime)
|
||||
stats['dirtouches'] += 1
|
||||
except Exception as e:
|
||||
log.error("ERROR: %s: %s", e, dirname)
|
||||
stats['direrrors'] += 1
|
||||
|
||||
if file in filelist:
|
||||
stats['files'] -= 1
|
||||
filelist.remove(file)
|
||||
do_file()
|
||||
|
||||
if args.dirs and status in ('A', 'D'):
|
||||
dirname = os.path.dirname(file)
|
||||
if dirname in dirlist:
|
||||
dirlist.remove(dirname)
|
||||
do_dir()
|
||||
|
||||
# All files done?
|
||||
if not stats['files']:
|
||||
git.terminate()
|
||||
return
|
||||
|
||||
|
||||
# Main Logic ##################################################################
|
||||
|
||||
def main():
|
||||
start = time.time() # yes, Wall time. CPU time is not realistic for users.
|
||||
stats = {_: 0 for _ in ('loglines', 'commits', 'touches', 'skip', 'errors',
|
||||
'dirtouches', 'direrrors')}
|
||||
|
||||
logging.basicConfig(level=args.loglevel, format='%(message)s')
|
||||
log.trace("Arguments: %s", args)
|
||||
|
||||
# First things first: Where and Who are we?
|
||||
if args.cwd:
|
||||
log.debug("Changing directory: %s", args.cwd)
|
||||
try:
|
||||
os.chdir(args.cwd)
|
||||
except OSError as e:
|
||||
log.critical(e)
|
||||
return e.errno
|
||||
# Using both os.chdir() and `git -C` is redundant, but might prevent side effects
|
||||
# `git -C` alone could be enough if we make sure that:
|
||||
# - all paths, including args.pathspec, are processed by git: ls-files, rev-parse
|
||||
# - touch() / os.utime() path argument is always prepended with git.workdir
|
||||
try:
|
||||
git = Git(workdir=args.workdir, gitdir=args.gitdir, cwd=args.cwd)
|
||||
except Git.Error as e:
|
||||
# Not in a git repository, and git already informed user on stderr. So we just...
|
||||
return e.returncode
|
||||
|
||||
# Get the files managed by git and build file list to be processed
|
||||
if UPDATE_SYMLINKS and not args.skip_older_than:
|
||||
filelist = set(git.ls_files(args.pathspec))
|
||||
else:
|
||||
filelist = set()
|
||||
for path in git.ls_files(args.pathspec):
|
||||
fullpath = os.path.join(git.workdir, path)
|
||||
|
||||
# Symlink (to file, to dir or broken - git handles the same way)
|
||||
if not UPDATE_SYMLINKS and os.path.islink(fullpath):
|
||||
log.warning("WARNING: Skipping symlink, no OS support for updates: %s",
|
||||
path)
|
||||
continue
|
||||
|
||||
# skip files which are older than given threshold
|
||||
if (args.skip_older_than
|
||||
and start - get_mtime_path(fullpath) > args.skip_older_than):
|
||||
continue
|
||||
|
||||
# Always add files relative to worktree root
|
||||
filelist.add(path)
|
||||
|
||||
# If --force, silently ignore uncommitted deletions (not in the filesystem)
|
||||
# and renames / additions (will not be found in log anyway)
|
||||
if args.force:
|
||||
filelist -= set(git.ls_dirty(force=True))
|
||||
# Otherwise, ignore any dirty files
|
||||
else:
|
||||
dirty = set(git.ls_dirty())
|
||||
if dirty:
|
||||
log.warning("WARNING: Modified files in the working directory were ignored."
|
||||
"\nTo include such files, commit your changes or use --force.")
|
||||
filelist -= dirty
|
||||
|
||||
# Build dir list to be processed
|
||||
dirlist = set(os.path.dirname(_) for _ in filelist) if args.dirs else set()
|
||||
|
||||
stats['totalfiles'] = stats['files'] = len(filelist)
|
||||
log.info("{0:,} files to be processed in work dir".format(stats['totalfiles']))
|
||||
|
||||
if not filelist:
|
||||
# Nothing to do. Exit silently and without errors, just like git does
|
||||
return
|
||||
|
||||
# Process the log until all files are 'touched'
|
||||
log.debug("Line #\tLog #\tF.Left\tModification Time\tFile Name")
|
||||
parse_log(filelist, dirlist, stats, git, args.merge, args.pathspec)
|
||||
|
||||
# Missing files
|
||||
if filelist:
|
||||
# Try to find them in merge logs, if not done already
|
||||
# (usually HUGE, thus MUCH slower!)
|
||||
if args.missing and not args.merge:
|
||||
filterlist = list(filelist)
|
||||
missing = len(filterlist)
|
||||
log.info("{0:,} files not found in log, trying merge commits".format(missing))
|
||||
for i in range(0, missing, STEPMISSING):
|
||||
parse_log(filelist, dirlist, stats, git,
|
||||
merge=True, filterlist=filterlist[i:i + STEPMISSING])
|
||||
|
||||
# Still missing some?
|
||||
for file in filelist:
|
||||
log.warning("WARNING: not found in the log: %s", file)
|
||||
|
||||
# Final statistics
|
||||
# Suggestion: use git-log --before=mtime to brag about skipped log entries
|
||||
def log_info(msg, *a, width=13):
|
||||
ifmt = '{:%d,}' % (width,) # not using 'n' for consistency with ffmt
|
||||
ffmt = '{:%d,.2f}' % (width,)
|
||||
# %-formatting lacks a thousand separator, must pre-render with .format()
|
||||
log.info(msg.replace('%d', ifmt).replace('%f', ffmt).format(*a))
|
||||
|
||||
log_info(
|
||||
"Statistics:\n"
|
||||
"%f seconds\n"
|
||||
"%d log lines processed\n"
|
||||
"%d commits evaluated",
|
||||
time.time() - start, stats['loglines'], stats['commits'])
|
||||
|
||||
if args.dirs:
|
||||
if stats['direrrors']: log_info("%d directory update errors", stats['direrrors'])
|
||||
log_info("%d directories updated", stats['dirtouches'])
|
||||
|
||||
if stats['touches'] != stats['totalfiles']:
|
||||
log_info("%d files", stats['totalfiles'])
|
||||
if stats['skip']: log_info("%d files skipped", stats['skip'])
|
||||
if stats['files']: log_info("%d files missing", stats['files'])
|
||||
if stats['errors']: log_info("%d file update errors", stats['errors'])
|
||||
|
||||
log_info("%d files updated", stats['touches'])
|
||||
|
||||
if args.test:
|
||||
log.info("TEST RUN - No files modified!")
|
||||
|
||||
|
||||
# Keep only essential, global assignments here. Any other logic must be in main()
|
||||
log = setup_logging()
|
||||
args = parse_args()
|
||||
|
||||
# Set the actual touch() and other functions based on command-line arguments
|
||||
if args.unique_times:
|
||||
touch = touch_ns
|
||||
isodate = isodate_ns
|
||||
|
||||
# Make sure this is always set last to ensure --test behaves as intended
|
||||
if args.test:
|
||||
touch = dummy
|
||||
|
||||
# UI done, it's showtime!
|
||||
try:
|
||||
sys.exit(main())
|
||||
except KeyboardInterrupt:
|
||||
log.info("\nAborting")
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
111
.github/workflows/_lint.yml
vendored
111
.github/workflows/_lint.yml
vendored
@@ -9,38 +9,133 @@ on:
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
POETRY_VERSION: "1.5.1"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# This number is set "by eye": we want it to be big enough
|
||||
# so that it's bigger than the number of commits in any reasonable PR,
|
||||
# and also as small as possible since increasing the number makes
|
||||
# the initial `git fetch` slower.
|
||||
FETCH_DEPTH: 50
|
||||
strategy:
|
||||
matrix:
|
||||
# Only lint on the min and max supported Python versions.
|
||||
# It's extremely unlikely that there's a lint issue on any version in between
|
||||
# that doesn't show up on the min or max versions.
|
||||
#
|
||||
# GitHub rate-limits how many jobs can be running at any one time.
|
||||
# Starting new jobs is also relatively slow,
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# Fetch the last FETCH_DEPTH commits, so the mtime-changing script
|
||||
# can accurately set the mtimes of files modified in the last FETCH_DEPTH commits.
|
||||
fetch-depth: ${{ env.FETCH_DEPTH }}
|
||||
- name: Restore workdir file mtimes to last-edited commit date
|
||||
id: restore-mtimes
|
||||
# This is needed to make black caching work.
|
||||
# Black's cache uses file (mtime, size) to check whether a lookup is a cache hit.
|
||||
# Without this command, files in the repo would have the current time as the modified time,
|
||||
# since the previous action step just created them.
|
||||
# This command resets the mtime to the last time the files were modified in git instead,
|
||||
# which is a high-quality and stable representation of the last modification date.
|
||||
run: |
|
||||
# Important considerations:
|
||||
# - These commands run at base of the repo, since we never `cd` to the `WORKDIR`.
|
||||
# - We only want to alter mtimes for Python files, since that's all black checks.
|
||||
# - We don't need to alter mtimes for directories, since black doesn't look at those.
|
||||
# - We also only alter mtimes inside the `WORKDIR` since that's all we'll lint.
|
||||
# - This should run before `poetry install`, because poetry's venv also contains
|
||||
# Python files, and we don't want to alter their mtimes since they aren't linted.
|
||||
|
||||
# Ensure we fail on non-zero exits and on undefined variables.
|
||||
# Also print executed commands, for easier debugging.
|
||||
set -eux
|
||||
|
||||
# Restore the mtimes of Python files in the workdir based on git history.
|
||||
.github/tools/git-restore-mtime --no-directories "$WORKDIR/**/*.py"
|
||||
|
||||
# Since CI only does a partial fetch (to `FETCH_DEPTH`) for efficiency,
|
||||
# the local git repo doesn't have full history. There are probably files
|
||||
# that were last modified in a commit *older than* the oldest fetched commit.
|
||||
# After `git-restore-mtime`, such files have a mtime set to the oldest fetched commit.
|
||||
#
|
||||
# As new commits get added, that timestamp will keep moving forward.
|
||||
# If left unchanged, this will make `black` think that the files were edited
|
||||
# more recently than its cache suggests. Instead, we can set their mtime
|
||||
# to a fixed date in the far past that won't change and won't cause cache misses in black.
|
||||
#
|
||||
# For all workdir Python files modified in or before the oldest few fetched commits,
|
||||
# make their mtime be 2000-01-01 00:00:00.
|
||||
OLDEST_COMMIT="$(git log --reverse '--pretty=format:%H' | head -1)"
|
||||
OLDEST_COMMIT_TIME="$(git show -s '--format=%ai' "$OLDEST_COMMIT")"
|
||||
find "$WORKDIR" -name '*.py' -type f -not -newermt "$OLDEST_COMMIT_TIME" -exec touch -c -m -t '200001010000' '{}' '+'
|
||||
|
||||
echo "oldest-commit=$OLDEST_COMMIT" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/cache@v3
|
||||
id: cache-pip
|
||||
name: Cache langchain editable pip install - ${{ matrix.python-version }}
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
key: pip-editable-langchain-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ matrix.python-version }}
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry==$POETRY_VERSION
|
||||
pipx install "poetry==$POETRY_VERSION"
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
cache-dependency-path: |
|
||||
${{ env.WORKDIR }}/**/poetry.lock
|
||||
- name: Install dependencies
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install
|
||||
- name: Install langchain editable
|
||||
if: ${{ inputs.working-directory != 'langchain' }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.working-directory != 'libs/langchain' }}
|
||||
run: |
|
||||
pip install -e ../langchain
|
||||
|
||||
- name: Restore black cache
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
CACHE_BASE: black-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.black_cache
|
||||
key: ${{ env.CACHE_BASE }}-${{ steps.restore-mtimes.outputs.oldest-commit }}
|
||||
restore-keys:
|
||||
# If we can't find an exact match for our cache key, accept any with this prefix.
|
||||
${{ env.CACHE_BASE }}-
|
||||
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
env:
|
||||
BLACK_CACHE_DIR: .black_cache
|
||||
run: |
|
||||
make lint
|
||||
|
||||
27
.github/workflows/_release.yml
vendored
27
.github/workflows/_release.yml
vendored
@@ -9,21 +9,27 @@ on:
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
POETRY_VERSION: "1.5.1"
|
||||
|
||||
jobs:
|
||||
if_release:
|
||||
if: |
|
||||
${{ github.event.pull_request.merged == true }}
|
||||
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
|
||||
# Disallow publishing from branches that aren't `master`.
|
||||
if: github.ref == 'refs/heads/master'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is used for trusted publishing:
|
||||
# https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/
|
||||
#
|
||||
# Trusted publishing has to also be configured on PyPI for each package:
|
||||
# https://docs.pypi.org/trusted-publishers/adding-a-publisher/
|
||||
id-token: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
run: pipx install "poetry==$POETRY_VERSION"
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
@@ -45,8 +51,9 @@ jobs:
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ steps.check-version.outputs.version }}
|
||||
commit: master
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
poetry publish
|
||||
- name: Publish package distributions to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: ${{ inputs.working-directory }}/dist/
|
||||
verbose: true
|
||||
print-hash: true
|
||||
|
||||
14
.github/workflows/_test.yml
vendored
14
.github/workflows/_test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
default: '["core", "extended", "core-pydantic-2"]'
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
POETRY_VERSION: "1.5.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
poetry-version: "1.4.2"
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: ${{ matrix.test_type }}
|
||||
install-command: |
|
||||
if [ "${{ matrix.test_type }}" == "core" ]; then
|
||||
@@ -46,7 +46,11 @@ jobs:
|
||||
elif [ "${{ matrix.test_type }}" == "core-pydantic-2" ]; then
|
||||
echo "Running core-pydantic-v2 tests, installing dependencies with poetry..."
|
||||
poetry install
|
||||
poetry add pydantic@2.1
|
||||
|
||||
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
|
||||
# which would prevent caching from working: the cache would get saved
|
||||
# to a different key than where it gets loaded from.
|
||||
poetry run pip install 'pydantic>=2.1,<3'
|
||||
else
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
@@ -59,10 +63,10 @@ jobs:
|
||||
EXPECTED_VERSION=1
|
||||
fi
|
||||
echo "Checking pydantic version... Expecting ${EXPECTED_VERSION}"
|
||||
|
||||
|
||||
# Determine the major part of pydantic version
|
||||
VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
|
||||
# Check that the major part of pydantic version is as expected, if not
|
||||
# raise an error
|
||||
if [[ "$VERSION" -ne $EXPECTED_VERSION ]]; then
|
||||
|
||||
2
.github/workflows/langchain_ci.yml
vendored
2
.github/workflows/langchain_ci.yml
vendored
@@ -24,5 +24,5 @@ jobs:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
test_type: '["core", "extended"]'
|
||||
test_type: '["core", "extended", "core-pydantic-2"]'
|
||||
secrets: inherit
|
||||
@@ -2,13 +2,6 @@
|
||||
name: libs/experimental Release
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'libs/experimental/pyproject.toml'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
@@ -17,4 +10,4 @@ jobs:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
secrets: inherit
|
||||
|
||||
9
.github/workflows/langchain_release.yml
vendored
9
.github/workflows/langchain_release.yml
vendored
@@ -2,13 +2,6 @@
|
||||
name: libs/langchain Release
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'libs/langchain/pyproject.toml'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
@@ -17,4 +10,4 @@ jobs:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
secrets: inherit
|
||||
|
||||
4
.github/workflows/scheduled_test.yml
vendored
4
.github/workflows/scheduled_test.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
POETRY_VERSION: "1.5.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.4.2"
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/langchain
|
||||
install-command: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
|
||||
14
README.md
14
README.md
@@ -2,18 +2,18 @@
|
||||
|
||||
⚡ Building applications with LLMs through composability ⚡
|
||||
|
||||
[](https://github.com/hwchase17/langchain/releases)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/langchain_ci.yml)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/langchain_experimental_ci.yml)
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/langchain_ci.yml)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/langchain_experimental_ci.yml)
|
||||
[](https://pepy.tech/project/langchain)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://twitter.com/langchainai)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchain)
|
||||
[](https://codespaces.new/hwchase17/langchain)
|
||||
[](https://star-history.com/#hwchase17/langchain)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/hwchase17/langchain/issues)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/hwchase17/langchainjs).
|
||||
|
||||
6
SECURITY.md
Normal file
6
SECURITY.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report security vulnerabilities by email to `security@langchain.dev`.
|
||||
This email is an alias to a subset of our maintainers, and will ensure the issue is promptly triaged and acted upon as needed.
|
||||
@@ -150,7 +150,8 @@ def _load_package_modules(
|
||||
|
||||
relative_module_name = file_path.relative_to(package_path)
|
||||
|
||||
if relative_module_name.name.startswith("_"):
|
||||
# Skip if any module part starts with an underscore
|
||||
if any(part.startswith("_") for part in relative_module_name.parts):
|
||||
continue
|
||||
|
||||
# Get the full namespace of the module
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
-e libs/langchain
|
||||
-e libs/experimental
|
||||
pydantic<2
|
||||
autodoc_pydantic==1.8.0
|
||||
myst_parser
|
||||
nbsphinx==0.8.9
|
||||
|
||||
@@ -28,7 +28,7 @@ LangChain is the product of over 5,000+ contributions by 1,500+ contributors, an
|
||||
# 🌍 Meetups, Events, and Hackathons
|
||||
|
||||
One of our favorite things about working in AI is how much enthusiasm there is for building together. We want to help make that as easy and impactful for you as possible!
|
||||
- **Find a meetup, hackathon, or webinar:** you can find the one for you on on our [global events calendar](https://mirror-feeling-d80.notion.site/0bc81da76a184297b86ca8fc782ee9a3?v=0d80342540df465396546976a50cfb3f).
|
||||
- **Find a meetup, hackathon, or webinar:** you can find the one for you on our [global events calendar](https://mirror-feeling-d80.notion.site/0bc81da76a184297b86ca8fc782ee9a3?v=0d80342540df465396546976a50cfb3f).
|
||||
- **Submit an event to our calendar:** email us at events@langchain.dev with a link to your event page! We can also help you spread the word with our local communities.
|
||||
- **Host a meetup:** If you want to bring a group of builders together, we want to help! We can publicize your event on our event calendar/Twitter, share with our local communities in Discord, send swag, or potentially hook you up with a sponsor. Email us at events@langchain.dev to tell us about your event!
|
||||
- **Become a meetup sponsor:** we often hear from groups of builders that want to get together, but are blocked or limited on some dimension (space to host, budget for snacks, prizes to distribute, etc.). If you’d like to help, send us an email to events@langchain.dev we can share more about how it works!
|
||||
|
||||
@@ -28,7 +28,7 @@ LangChain provides standard, extendable interfaces and external integrations for
|
||||
|
||||
#### [Model I/O](/docs/modules/model_io/)
|
||||
Interface with language models
|
||||
#### [Data connection](/docs/modules/data_connection/)
|
||||
#### [Retrieval](/docs/modules/data_connection/)
|
||||
Interface with application-specific data
|
||||
#### [Chains](/docs/modules/chains/)
|
||||
Construct sequences of calls
|
||||
|
||||
@@ -2,15 +2,60 @@
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Data connection
|
||||
# Retrieval
|
||||
|
||||
Many LLM applications require user-specific data that is not part of the model's training set. LangChain gives you the
|
||||
building blocks to load, transform, store and query your data via:
|
||||
Many LLM applications require user-specific data that is not part of the model's training set.
|
||||
The primary way of accomplishing this is through Retrieval Augmented Generation (RAG).
|
||||
In this process, external data is *retrieved* and then passed to the LLM when doing the *generation* step.
|
||||
|
||||
- [Document loaders](/docs/modules/data_connection/document_loaders/): Load documents from many different sources
|
||||
- [Document transformers](/docs/modules/data_connection/document_transformers/): Split documents, convert documents into Q&A format, drop redundant documents, and more
|
||||
- [Text embedding models](/docs/modules/data_connection/text_embedding/): Take unstructured text and turn it into a list of floating point numbers
|
||||
- [Vector stores](/docs/modules/data_connection/vectorstores/): Store and search over embedded data
|
||||
- [Retrievers](/docs/modules/data_connection/retrievers/): Query your data
|
||||
LangChain provides all the building blocks for RAG applications - from simple to complex.
|
||||
This section of the documentation covers everything related to the *retrieval* step - e.g. the fetching of the data.
|
||||
Although this sounds simple, it can be subtly complex.
|
||||
This encompasses several key modules.
|
||||
|
||||

|
||||
|
||||
**[Document loaders](/docs/modules/data_connection/document_loaders/)**
|
||||
|
||||
Load documents from many different sources.
|
||||
LangChain provides over a 100 different document loaders as well as integrations with other major providers in the space,
|
||||
like AirByte and Unstructured.
|
||||
We provide integrations to load all types of documents (html, PDF, code) from all types of locations (private s3 buckets, public websites).
|
||||
|
||||
**[Document transformers](/docs/modules/data_connection/document_transformers/)**
|
||||
|
||||
A key part of retrieval is fetching only the relevant parts of documents.
|
||||
This involves several transformation steps in order to best prepare the documents for retrieval.
|
||||
One of the primary ones here is splitting (or chunking) a large document into smaller chunks.
|
||||
LangChain provides several different algorithms for doing this, as well as logic optimized for specific document types (code, markdown, etc).
|
||||
|
||||
**[Text embedding models](/docs/modules/data_connection/text_embedding/)**
|
||||
|
||||
Another key part of retrieval has become creating embeddings for documents.
|
||||
Embeddings capture the semantic meaning of text, allowing you to quickly and
|
||||
efficiently find other pieces of text that are similar.
|
||||
LangChain provides integrations with over 25 different embedding providers and methods,
|
||||
from open-source to proprietary API,
|
||||
allowing you to choose the one best suited for your needs.
|
||||
LangChain exposes a standard interface, allowing you to easily swap between models.
|
||||
|
||||
**[Vector stores](/docs/modules/data_connection/vectorstores/)**
|
||||
|
||||
With the rise of embeddings, there has emerged a need for databases to support efficient storage and searching of these embeddings.
|
||||
LangChain provides integrations with over 50 different vectorstores, from open-source local ones to cloud-hosted proprietary ones,
|
||||
allowing you choose the one best suited for your needs.
|
||||
LangChain exposes a standard interface, allowing you to easily swap between vector stores.
|
||||
|
||||
**[Retrievers](/docs/modules/data_connection/retrievers/)**
|
||||
|
||||
Once the data is in the database, you still need to retrieve it.
|
||||
LangChain supports many different retrieval algorithms and is one of the places where we add the most value.
|
||||
We support basic methods that are easy to get started - namely simple semantic search.
|
||||
However, we have also added a collection of algorithms on top of this to increase performance.
|
||||
These include:
|
||||
|
||||
- [Parent Document Retriever](/docs/modules/data_connection/retrievers/parent_document_retriever): This allows you to create multiple embeddings per parent document, allowing you to look up smaller chunks but return larger context.
|
||||
- [Self Query Retriever](/docs/modules/data_connection/retrievers/self_query): User questions often contain reference to something that isn't just semantic, but rather expresses some logic that can best be represented as a metadata filter. Self-query allows you to parse out the *semantic* part of a query from other *metadata filters* present in the query
|
||||
- [Ensemble Retriever](/docs/modules/data_connection/retrievers/ensemble): Sometimes you may want to retrieve documents from multiple different sources, or using multiple different algorithms. The ensemble retriever allows you to easily do this.
|
||||
- And more!
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ LangChain provides standard, extendable interfaces and external integrations for
|
||||
|
||||
#### [Model I/O](/docs/modules/model_io/)
|
||||
Interface with language models
|
||||
#### [Data connection](/docs/modules/data_connection/)
|
||||
#### [Retrieval](/docs/modules/data_connection/)
|
||||
Interface with application-specific data
|
||||
#### [Chains](/docs/modules/chains/)
|
||||
Construct sequences of calls
|
||||
|
||||
BIN
docs/docs_skeleton/static/img/OSS_LLM_overview.png
Normal file
BIN
docs/docs_skeleton/static/img/OSS_LLM_overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 288 KiB |
BIN
docs/docs_skeleton/static/img/llama-memory-weights.png
Normal file
BIN
docs/docs_skeleton/static/img/llama-memory-weights.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 44 KiB |
BIN
docs/docs_skeleton/static/img/llama_t_put.png
Normal file
BIN
docs/docs_skeleton/static/img/llama_t_put.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 35 KiB |
@@ -1,15 +1,15 @@
|
||||
# Tutorials
|
||||
|
||||
Below are links to video tutorials and courses on LangChain. For written guides on common use cases for LangChain, check out the [use cases guides](/docs/use_cases).
|
||||
Below are links to tutorials and courses on LangChain. For written guides on common use cases for LangChain, check out the [use cases guides](/docs/use_cases).
|
||||
|
||||
⛓ icon marks a new addition [last update 2023-07-05]
|
||||
⛓ icon marks a new addition [last update 2023-08-20]
|
||||
|
||||
---------------------
|
||||
|
||||
### DeepLearning.AI courses
|
||||
by [Harrison Chase](https://github.com/hwchase17) and [Andrew Ng](https://en.wikipedia.org/wiki/Andrew_Ng)
|
||||
- [LangChain for LLM Application Development](https://learn.deeplearning.ai/langchain)
|
||||
- ⛓ [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
|
||||
- [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
|
||||
|
||||
### Handbook
|
||||
[LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
@@ -36,14 +36,14 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- #8 [Create Custom Tools for Chatbots in LangChain](https://youtu.be/q-HNphrWsDE)
|
||||
- #9 [Build Conversational Agents with Vector DBs](https://youtu.be/H6bCqqw9xyI)
|
||||
- [Using NEW `MPT-7B` in Hugging Face and LangChain](https://youtu.be/DXpk9K7DgMo)
|
||||
- ⛓ [`MPT-30B` Chatbot with LangChain](https://youtu.be/pnem-EhT6VI)
|
||||
- [`MPT-30B` Chatbot with LangChain](https://youtu.be/pnem-EhT6VI)
|
||||
|
||||
|
||||
### [LangChain 101](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5) by [Greg Kamradt (Data Indy)](https://www.youtube.com/@DataIndependent)
|
||||
- [What Is LangChain? - LangChain + `ChatGPT` Overview](https://youtu.be/_v_fgW2SkkQ)
|
||||
- [Quickstart Guide](https://youtu.be/kYRB-vJFy38)
|
||||
- [Beginner Guide To 7 Essential Concepts](https://youtu.be/2xxziIWmaSA)
|
||||
- [Beginner Guide To 9 Use Cases](https://youtu.be/vGP4pQdCocw)
|
||||
- [Beginner's Guide To 7 Essential Concepts](https://youtu.be/2xxziIWmaSA)
|
||||
- [Beginner's Guide To 9 Use Cases](https://youtu.be/vGP4pQdCocw)
|
||||
- [Agents Overview + Google Searches](https://youtu.be/Jq9Sf68ozk0)
|
||||
- [`OpenAI` + `Wolfram Alpha`](https://youtu.be/UijbzCIJ99g)
|
||||
- [Ask Questions On Your Custom (or Private) Files](https://youtu.be/EnT-ZTrcPrg)
|
||||
@@ -63,7 +63,7 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- [Build Your Own `AI Twitter Bot` Using LLMs](https://youtu.be/yLWLDjT01q8)
|
||||
- [ChatGPT made my interview questions for me (`Streamlit` + LangChain)](https://youtu.be/zvoAMx0WKkw)
|
||||
- [Function Calling via ChatGPT API - First Look With LangChain](https://youtu.be/0-zlUy7VUjg)
|
||||
- ⛓ [Extract Topics From Video/Audio With LLMs (Topic Modeling w/ LangChain)](https://youtu.be/pEkxRQFNAs4)
|
||||
- [Extract Topics From Video/Audio With LLMs (Topic Modeling w/ LangChain)](https://youtu.be/pEkxRQFNAs4)
|
||||
|
||||
|
||||
### [LangChain How to and guides](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ) by [Sam Witteveen](https://www.youtube.com/@samwitteveenai)
|
||||
@@ -73,7 +73,7 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- [Conversations with Memory (explanation & code walkthrough)](https://youtu.be/X550Zbz_ROE)
|
||||
- [Chat with `Flan20B`](https://youtu.be/VW5LBavIfY4)
|
||||
- [Using `Hugging Face Models` locally (code walkthrough)](https://youtu.be/Kn7SX2Mx_Jk)
|
||||
- [`PAL` : Program-aided Language Models with LangChain code](https://youtu.be/dy7-LvDu-3s)
|
||||
- [`PAL`: Program-aided Language Models with LangChain code](https://youtu.be/dy7-LvDu-3s)
|
||||
- [Building a Summarization System with LangChain and `GPT-3` - Part 1](https://youtu.be/LNq_2s_H01Y)
|
||||
- [Building a Summarization System with LangChain and `GPT-3` - Part 2](https://youtu.be/d-yeHDLgKHw)
|
||||
- [Microsoft's `Visual ChatGPT` using LangChain](https://youtu.be/7YEiEyfPF5U)
|
||||
@@ -85,7 +85,7 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- [`BabyAGI`: Discover the Power of Task-Driven Autonomous Agents!](https://youtu.be/QBcDLSE2ERA)
|
||||
- [Improve your `BabyAGI` with LangChain](https://youtu.be/DRgPyOXZ-oE)
|
||||
- [Master `PDF` Chat with LangChain - Your essential guide to queries on documents](https://youtu.be/ZzgUqFtxgXI)
|
||||
- [Using LangChain with `DuckDuckGO` `Wikipedia` & `PythonREPL` Tools](https://youtu.be/KerHlb8nuVc)
|
||||
- [Using LangChain with `DuckDuckGO`, `Wikipedia` & `PythonREPL` Tools](https://youtu.be/KerHlb8nuVc)
|
||||
- [Building Custom Tools and Agents with LangChain (gpt-3.5-turbo)](https://youtu.be/biS8G8x8DdA)
|
||||
- [LangChain Retrieval QA Over Multiple Files with `ChromaDB`](https://youtu.be/3yPBVii7Ct0)
|
||||
- [LangChain Retrieval QA with Instructor Embeddings & `ChromaDB` for PDFs](https://youtu.be/cFCGUjc33aU)
|
||||
@@ -99,7 +99,7 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- [`OpenAI Functions` + LangChain : Building a Multi Tool Agent](https://youtu.be/4KXK6c6TVXQ)
|
||||
- [What can you do with 16K tokens in LangChain?](https://youtu.be/z2aCZBAtWXs)
|
||||
- [Tagging and Extraction - Classification using `OpenAI Functions`](https://youtu.be/a8hMgIcUEnE)
|
||||
- ⛓ [HOW to Make Conversational Form with LangChain](https://youtu.be/IT93On2LB5k)
|
||||
- [HOW to Make Conversational Form with LangChain](https://youtu.be/IT93On2LB5k)
|
||||
|
||||
|
||||
### [LangChain](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr) by [Prompt Engineering](https://www.youtube.com/@engineerprompt)
|
||||
@@ -107,7 +107,7 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- [Working with MULTIPLE `PDF` Files in LangChain: `ChatGPT` for your Data](https://youtu.be/s5LhRdh5fu4)
|
||||
- [`ChatGPT` for YOUR OWN `PDF` files with LangChain](https://youtu.be/TLf90ipMzfE)
|
||||
- [Talk to YOUR DATA without OpenAI APIs: LangChain](https://youtu.be/wrD-fZvT6UI)
|
||||
- [Langchain: PDF Chat App (GUI) | ChatGPT for Your PDF FILES](https://youtu.be/RIWbalZ7sTo)
|
||||
- [LangChain: PDF Chat App (GUI) | ChatGPT for Your PDF FILES](https://youtu.be/RIWbalZ7sTo)
|
||||
- [LangFlow: Build Chatbots without Writing Code](https://youtu.be/KJ-ux3hre4s)
|
||||
- [LangChain: Giving Memory to LLMs](https://youtu.be/dxO6pzlgJiY)
|
||||
- [BEST OPEN Alternative to `OPENAI's EMBEDDINGs` for Retrieval QA: LangChain](https://youtu.be/ogEalPMUCSY)
|
||||
@@ -121,5 +121,9 @@ Below are links to video tutorials and courses on LangChain. For written guides
|
||||
- [LangChain Agents: Build Personal Assistants For Your Data (Q&A with Harrison Chase and Mayo Oshin)](https://youtu.be/gVkF8cwfBLI)
|
||||
|
||||
|
||||
### Codebase Analysis
|
||||
- ⛓ [Codebase Analysis: Langchain Agents](https://carbonated-yacht-2c5.notion.site/Codebase-Analysis-Langchain-Agents-0b0587acd50647ca88aaae7cff5df1f2)
|
||||
|
||||
|
||||
---------------------
|
||||
⛓ icon marks a new addition [last update 2023-07-05]
|
||||
⛓ icon marks a new addition [last update 2023-08-20]
|
||||
|
||||
@@ -1,265 +1,375 @@
|
||||
# Dependents
|
||||
|
||||
Dependents stats for `hwchase17/langchain`
|
||||
Dependents stats for `langchain-ai/langchain`
|
||||
|
||||
[](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=244&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=9697&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[&message=19827&color=informational&logo=slickpic)](https://github.com/hwchase17/langchain/network/dependents)
|
||||
[](https://github.com/langchain-ai/langchain/network/dependents)
|
||||
[&message=355&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
|
||||
[&message=19140&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
|
||||
[&message=22524&color=informational&logo=slickpic)](https://github.com/langchain-ai/langchain/network/dependents)
|
||||
|
||||
|
||||
[update: 2023-07-07; only dependent repositories with Stars > 100]
|
||||
[update: `2023-08-17`; only dependent repositories with Stars > 100]
|
||||
|
||||
|
||||
| Repository | Stars |
|
||||
| :-------- | -----: |
|
||||
|[openai/openai-cookbook](https://github.com/openai/openai-cookbook) | 41047 |
|
||||
|[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 33983 |
|
||||
|[microsoft/TaskMatrix](https://github.com/microsoft/TaskMatrix) | 33375 |
|
||||
|[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 31114 |
|
||||
|[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 30369 |
|
||||
|[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 24116 |
|
||||
|[OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 22565 |
|
||||
|[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 18375 |
|
||||
|[jerryjliu/llama_index](https://github.com/jerryjliu/llama_index) | 17723 |
|
||||
|[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 16958 |
|
||||
|[mlflow/mlflow](https://github.com/mlflow/mlflow) | 14632 |
|
||||
|[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 11273 |
|
||||
|[openai/evals](https://github.com/openai/evals) | 10745 |
|
||||
|[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10298 |
|
||||
|[imClumsyPanda/langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM) | 9838 |
|
||||
|[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 9247 |
|
||||
|[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 8768 |
|
||||
|[PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 8651 |
|
||||
|[StanGirard/quivr](https://github.com/StanGirard/quivr) | 8119 |
|
||||
|[go-skynet/LocalAI](https://github.com/go-skynet/LocalAI) | 7418 |
|
||||
|[gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 7301 |
|
||||
|[PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 6636 |
|
||||
|[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 5849 |
|
||||
|[e2b-dev/e2b](https://github.com/e2b-dev/e2b) | 5129 |
|
||||
|[langgenius/dify](https://github.com/langgenius/dify) | 4804 |
|
||||
|[serge-chat/serge](https://github.com/serge-chat/serge) | 4448 |
|
||||
|[csunny/DB-GPT](https://github.com/csunny/DB-GPT) | 4350 |
|
||||
|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 4268 |
|
||||
|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 4244 |
|
||||
|[intitni/CopilotForXcode](https://github.com/intitni/CopilotForXcode) | 4232 |
|
||||
|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 4154 |
|
||||
|[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4080 |
|
||||
|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 3949 |
|
||||
|[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 3920 |
|
||||
|[bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 3481 |
|
||||
|[MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 3453 |
|
||||
|[mmabrouk/chatgpt-wrapper](https://github.com/mmabrouk/chatgpt-wrapper) | 3355 |
|
||||
|[postgresml/postgresml](https://github.com/postgresml/postgresml) | 3328 |
|
||||
|[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3100 |
|
||||
|[kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3049 |
|
||||
|[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 2844 |
|
||||
|[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 2833 |
|
||||
|[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 2809 |
|
||||
|[hwchase17/chat-langchain](https://github.com/hwchase17/chat-langchain) | 2809 |
|
||||
|[whitead/paper-qa](https://github.com/whitead/paper-qa) | 2664 |
|
||||
|[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 2650 |
|
||||
|[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2525 |
|
||||
|[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2372 |
|
||||
|[ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 2287 |
|
||||
|[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2265 |
|
||||
|[SamurAIGPT/privateGPT](https://github.com/SamurAIGPT/privateGPT) | 2084 |
|
||||
|[Chainlit/chainlit](https://github.com/Chainlit/chainlit) | 1912 |
|
||||
|[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 1869 |
|
||||
|[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 1864 |
|
||||
|[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 1849 |
|
||||
|[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 1766 |
|
||||
|[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 1745 |
|
||||
|[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 1732 |
|
||||
|[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 1716 |
|
||||
|[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1619 |
|
||||
|[pinterest/querybook](https://github.com/pinterest/querybook) | 1468 |
|
||||
|[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1446 |
|
||||
|[thomas-yanxin/LangChain-ChatGLM-Webui](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui) | 1430 |
|
||||
|[Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 1419 |
|
||||
|[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1416 |
|
||||
|[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1327 |
|
||||
|[psychic-api/psychic](https://github.com/psychic-api/psychic) | 1307 |
|
||||
|[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1242 |
|
||||
|[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1239 |
|
||||
|[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1203 |
|
||||
|[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1179 |
|
||||
|[keephq/keep](https://github.com/keephq/keep) | 1169 |
|
||||
|[greshake/llm-security](https://github.com/greshake/llm-security) | 1156 |
|
||||
|[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1090 |
|
||||
|[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1088 |
|
||||
|[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 1074 |
|
||||
|[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1057 |
|
||||
|[noahshinn024/reflexion](https://github.com/noahshinn024/reflexion) | 1045 |
|
||||
|[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1036 |
|
||||
|[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 999 |
|
||||
|[poe-platform/api-bot-tutorial](https://github.com/poe-platform/api-bot-tutorial) | 989 |
|
||||
|[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 974 |
|
||||
|[homanp/superagent](https://github.com/homanp/superagent) | 970 |
|
||||
|[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 941 |
|
||||
|[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 896 |
|
||||
|[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 856 |
|
||||
|[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 840 |
|
||||
|[chatarena/chatarena](https://github.com/chatarena/chatarena) | 829 |
|
||||
|[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 816 |
|
||||
|[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 816 |
|
||||
|[hashintel/hash](https://github.com/hashintel/hash) | 806 |
|
||||
|[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 790 |
|
||||
|[eyurtsev/kor](https://github.com/eyurtsev/kor) | 752 |
|
||||
|[cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 713 |
|
||||
|[e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 686 |
|
||||
|[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 685 |
|
||||
|[refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 673 |
|
||||
|[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 617 |
|
||||
|[billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 616 |
|
||||
|[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 609 |
|
||||
|[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 592 |
|
||||
|[getmetal/motorhead](https://github.com/getmetal/motorhead) | 581 |
|
||||
|[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 574 |
|
||||
|[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 572 |
|
||||
|[kreneskyp/ix](https://github.com/kreneskyp/ix) | 564 |
|
||||
|[akshata29/chatpdf](https://github.com/akshata29/chatpdf) | 540 |
|
||||
|[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 540 |
|
||||
|[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 537 |
|
||||
|[khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 531 |
|
||||
|[SamurAIGPT/ChatGPT-Developer-Plugins](https://github.com/SamurAIGPT/ChatGPT-Developer-Plugins) | 528 |
|
||||
|[microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 526 |
|
||||
|[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 515 |
|
||||
|[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 494 |
|
||||
|[StevenGrove/GPT4Tools](https://github.com/StevenGrove/GPT4Tools) | 483 |
|
||||
|[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 472 |
|
||||
|[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 465 |
|
||||
|[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 464 |
|
||||
|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 464 |
|
||||
|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 455 |
|
||||
|[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 455 |
|
||||
|[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 450 |
|
||||
|[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 446 |
|
||||
|[msoedov/langcorn](https://github.com/msoedov/langcorn) | 445 |
|
||||
|[plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 426 |
|
||||
|[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 426 |
|
||||
|[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 418 |
|
||||
|[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 416 |
|
||||
|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 401 |
|
||||
|[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 400 |
|
||||
|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 386 |
|
||||
|[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 382 |
|
||||
|[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 368 |
|
||||
|[showlab/VLog](https://github.com/showlab/VLog) | 363 |
|
||||
|[yvann-hub/Robby-chatbot](https://github.com/yvann-hub/Robby-chatbot) | 363 |
|
||||
|[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 361 |
|
||||
|[opentensor/bittensor](https://github.com/opentensor/bittensor) | 360 |
|
||||
|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 355 |
|
||||
|[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 351 |
|
||||
|[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 348 |
|
||||
|[alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 321 |
|
||||
|[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 314 |
|
||||
|[mosaicml/examples](https://github.com/mosaicml/examples) | 313 |
|
||||
|[personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 306 |
|
||||
|[itamargol/openai](https://github.com/itamargol/openai) | 304 |
|
||||
|[Anil-matcha/Website-to-Chatbot](https://github.com/Anil-matcha/Website-to-Chatbot) | 299 |
|
||||
|[momegas/megabots](https://github.com/momegas/megabots) | 299 |
|
||||
|[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 289 |
|
||||
|[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 283 |
|
||||
|[wandb/weave](https://github.com/wandb/weave) | 279 |
|
||||
|[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 273 |
|
||||
|[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 271 |
|
||||
|[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 270 |
|
||||
|[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 269 |
|
||||
|[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 259 |
|
||||
|[Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 252 |
|
||||
|[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 248 |
|
||||
|[hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 247 |
|
||||
|[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 243 |
|
||||
|[truera/trulens](https://github.com/truera/trulens) | 239 |
|
||||
|[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 238 |
|
||||
|[intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 237 |
|
||||
|[monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 236 |
|
||||
|[wandb/edu](https://github.com/wandb/edu) | 231 |
|
||||
|[recalign/RecAlign](https://github.com/recalign/RecAlign) | 229 |
|
||||
|[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 223 |
|
||||
|[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 221 |
|
||||
|[JohnSnowLabs/nlptest](https://github.com/JohnSnowLabs/nlptest) | 220 |
|
||||
|[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 219 |
|
||||
|[Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 215 |
|
||||
|[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 215 |
|
||||
|[steamship-packages/langchain-agent-production-starter](https://github.com/steamship-packages/langchain-agent-production-starter) | 214 |
|
||||
|[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 213 |
|
||||
|[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 211 |
|
||||
|[marella/chatdocs](https://github.com/marella/chatdocs) | 207 |
|
||||
|[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 200 |
|
||||
|[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 195 |
|
||||
|[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 189 |
|
||||
|[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 186 |
|
||||
|[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 185 |
|
||||
|[huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 179 |
|
||||
|[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 178 |
|
||||
|[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 178 |
|
||||
|[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 177 |
|
||||
|[jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 176 |
|
||||
|[rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 174 |
|
||||
|[edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 174 |
|
||||
|[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 172 |
|
||||
|[hardbyte/qabot](https://github.com/hardbyte/qabot) | 171 |
|
||||
|[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 165 |
|
||||
|[gustavz/DataChad](https://github.com/gustavz/DataChad) | 164 |
|
||||
|[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 163 |
|
||||
|[SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 161 |
|
||||
|[yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 161 |
|
||||
|[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 160 |
|
||||
|[jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 157 |
|
||||
|[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 157 |
|
||||
|[PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 156 |
|
||||
|[nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 155 |
|
||||
|[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 155 |
|
||||
|[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 154 |
|
||||
|[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 153 |
|
||||
|[preset-io/promptimize](https://github.com/preset-io/promptimize) | 150 |
|
||||
|[onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 148 |
|
||||
|[Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 146 |
|
||||
|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 144 |
|
||||
|[microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 144 |
|
||||
|[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 143 |
|
||||
|[shauryr/S2QA](https://github.com/shauryr/S2QA) | 142 |
|
||||
|[handrew/browserpilot](https://github.com/handrew/browserpilot) | 141 |
|
||||
|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 140 |
|
||||
|[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 140 |
|
||||
|[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 139 |
|
||||
|[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 139 |
|
||||
|[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 138 |
|
||||
|[hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 137 |
|
||||
|[steamship-core/vercel-examples](https://github.com/steamship-core/vercel-examples) | 137 |
|
||||
|[deeppavlov/dream](https://github.com/deeppavlov/dream) | 136 |
|
||||
|[miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 135 |
|
||||
|[sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 135 |
|
||||
|[yasyf/summ](https://github.com/yasyf/summ) | 135 |
|
||||
|[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 134 |
|
||||
|[vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 132 |
|
||||
|[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 130 |
|
||||
|[homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 128 |
|
||||
|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 127 |
|
||||
|[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 125 |
|
||||
|[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 122 |
|
||||
|[fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 122 |
|
||||
|[Aggregate-Intellect/practical-llms](https://github.com/Aggregate-Intellect/practical-llms) | 120 |
|
||||
|[davila7/file-gpt](https://github.com/davila7/file-gpt) | 120 |
|
||||
|[Azure-Samples/azure-search-openai-demo-csharp](https://github.com/Azure-Samples/azure-search-openai-demo-csharp) | 119 |
|
||||
|[prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 117 |
|
||||
|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 117 |
|
||||
|[zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 116 |
|
||||
|[flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 114 |
|
||||
|[kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 112 |
|
||||
|[RedisVentures/redis-openai-qna](https://github.com/RedisVentures/redis-openai-qna) | 111 |
|
||||
|[solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 111 |
|
||||
|[kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 109 |
|
||||
|[summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 109 |
|
||||
|[Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 106 |
|
||||
|[ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 106 |
|
||||
|[voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 105 |
|
||||
|[mallahyari/drqa](https://github.com/mallahyari/drqa) | 103 |
|
||||
|[openai/openai-cookbook](https://github.com/openai/openai-cookbook) | 46276 |
|
||||
|[AntonOsika/gpt-engineer](https://github.com/AntonOsika/gpt-engineer) | 41497 |
|
||||
|[imartinez/privateGPT](https://github.com/imartinez/privateGPT) | 36296 |
|
||||
|[LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant) | 34861 |
|
||||
|[microsoft/TaskMatrix](https://github.com/microsoft/TaskMatrix) | 33906 |
|
||||
|[hpcaitech/ColossalAI](https://github.com/hpcaitech/ColossalAI) | 31654 |
|
||||
|[streamlit/streamlit](https://github.com/streamlit/streamlit) | 26571 |
|
||||
|[reworkd/AgentGPT](https://github.com/reworkd/AgentGPT) | 25819 |
|
||||
|[OpenBB-finance/OpenBBTerminal](https://github.com/OpenBB-finance/OpenBBTerminal) | 23180 |
|
||||
|[geekan/MetaGPT](https://github.com/geekan/MetaGPT) | 21968 |
|
||||
|[jerryjliu/llama_index](https://github.com/jerryjliu/llama_index) | 20204 |
|
||||
|[StanGirard/quivr](https://github.com/StanGirard/quivr) | 20142 |
|
||||
|[openai/chatgpt-retrieval-plugin](https://github.com/openai/chatgpt-retrieval-plugin) | 19215 |
|
||||
|[mindsdb/mindsdb](https://github.com/mindsdb/mindsdb) | 17580 |
|
||||
|[cube-js/cube](https://github.com/cube-js/cube) | 16003 |
|
||||
|[PromtEngineer/localGPT](https://github.com/PromtEngineer/localGPT) | 15134 |
|
||||
|[mlflow/mlflow](https://github.com/mlflow/mlflow) | 15027 |
|
||||
|[chatchat-space/Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 14024 |
|
||||
|[GaiZhenbiao/ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT) | 12020 |
|
||||
|[logspace-ai/langflow](https://github.com/logspace-ai/langflow) | 11599 |
|
||||
|[openai/evals](https://github.com/openai/evals) | 11509 |
|
||||
|[airbytehq/airbyte](https://github.com/airbytehq/airbyte) | 11493 |
|
||||
|[databrickslabs/dolly](https://github.com/databrickslabs/dolly) | 10531 |
|
||||
|[go-skynet/LocalAI](https://github.com/go-skynet/LocalAI) | 9955 |
|
||||
|[AIGC-Audio/AudioGPT](https://github.com/AIGC-Audio/AudioGPT) | 9081 |
|
||||
|[gventuri/pandas-ai](https://github.com/gventuri/pandas-ai) | 8201 |
|
||||
|[hwchase17/langchainjs](https://github.com/hwchase17/langchainjs) | 7754 |
|
||||
|[langgenius/dify](https://github.com/langgenius/dify) | 7348 |
|
||||
|[PipedreamHQ/pipedream](https://github.com/PipedreamHQ/pipedream) | 6950 |
|
||||
|[h2oai/h2ogpt](https://github.com/h2oai/h2ogpt) | 6858 |
|
||||
|[arc53/DocsGPT](https://github.com/arc53/DocsGPT) | 6300 |
|
||||
|[0xpayne/gpt-migrate](https://github.com/0xpayne/gpt-migrate) | 6193 |
|
||||
|[eosphoros-ai/DB-GPT](https://github.com/eosphoros-ai/DB-GPT) | 6026 |
|
||||
|[bentoml/OpenLLM](https://github.com/bentoml/OpenLLM) | 5641 |
|
||||
|[jmorganca/ollama](https://github.com/jmorganca/ollama) | 5448 |
|
||||
|[e2b-dev/e2b](https://github.com/e2b-dev/e2b) | 5365 |
|
||||
|[mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 5352 |
|
||||
|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5192 |
|
||||
|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 4993 |
|
||||
|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 4831 |
|
||||
|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 4824 |
|
||||
|[serge-chat/serge](https://github.com/serge-chat/serge) | 4783 |
|
||||
|[Shaunwei/RealChar](https://github.com/Shaunwei/RealChar) | 4779 |
|
||||
|[gkamradt/langchain-tutorials](https://github.com/gkamradt/langchain-tutorials) | 4752 |
|
||||
|[openchatai/OpenChat](https://github.com/openchatai/OpenChat) | 4452 |
|
||||
|[intel-analytics/BigDL](https://github.com/intel-analytics/BigDL) | 4286 |
|
||||
|[madawei2699/myGPTReader](https://github.com/madawei2699/myGPTReader) | 4167 |
|
||||
|[MineDojo/Voyager](https://github.com/MineDojo/Voyager) | 3952 |
|
||||
|[embedchain/embedchain](https://github.com/embedchain/embedchain) | 3887 |
|
||||
|[postgresml/postgresml](https://github.com/postgresml/postgresml) | 3636 |
|
||||
|[assafelovic/gpt-researcher](https://github.com/assafelovic/gpt-researcher) | 3480 |
|
||||
|[llm-workflow-engine/llm-workflow-engine](https://github.com/llm-workflow-engine/llm-workflow-engine) | 3445 |
|
||||
|[marqo-ai/marqo](https://github.com/marqo-ai/marqo) | 3397 |
|
||||
|[kyegomez/tree-of-thoughts](https://github.com/kyegomez/tree-of-thoughts) | 3366 |
|
||||
|[RayVentura/ShortGPT](https://github.com/RayVentura/ShortGPT) | 3335 |
|
||||
|[Azure-Samples/azure-search-openai-demo](https://github.com/Azure-Samples/azure-search-openai-demo) | 3316 |
|
||||
|[langchain-ai/chat-langchain](https://github.com/langchain-ai/chat-langchain) | 3270 |
|
||||
|[khoj-ai/khoj](https://github.com/khoj-ai/khoj) | 3266 |
|
||||
|[PrefectHQ/marvin](https://github.com/PrefectHQ/marvin) | 3176 |
|
||||
|[project-baize/baize-chatbot](https://github.com/project-baize/baize-chatbot) | 2999 |
|
||||
|[whitead/paper-qa](https://github.com/whitead/paper-qa) | 2932 |
|
||||
|[OpenGVLab/InternGPT](https://github.com/OpenGVLab/InternGPT) | 2816 |
|
||||
|[continuedev/continue](https://github.com/continuedev/continue) | 2803 |
|
||||
|[ParisNeo/lollms-webui](https://github.com/ParisNeo/lollms-webui) | 2679 |
|
||||
|[OpenBMB/ToolBench](https://github.com/OpenBMB/ToolBench) | 2673 |
|
||||
|[shroominic/codeinterpreter-api](https://github.com/shroominic/codeinterpreter-api) | 2492 |
|
||||
|[OpenBMB/BMTools](https://github.com/OpenBMB/BMTools) | 2486 |
|
||||
|[GerevAI/gerev](https://github.com/GerevAI/gerev) | 2450 |
|
||||
|[SamurAIGPT/EmbedAI](https://github.com/SamurAIGPT/EmbedAI) | 2448 |
|
||||
|[Unstructured-IO/unstructured](https://github.com/Unstructured-IO/unstructured) | 2255 |
|
||||
|[Mintplex-Labs/anything-llm](https://github.com/Mintplex-Labs/anything-llm) | 2216 |
|
||||
|[emptycrown/llama-hub](https://github.com/emptycrown/llama-hub) | 2198 |
|
||||
|[homanp/superagent](https://github.com/homanp/superagent) | 2177 |
|
||||
|[yanqiangmiffy/Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) | 2144 |
|
||||
|[OpenGVLab/Ask-Anything](https://github.com/OpenGVLab/Ask-Anything) | 2092 |
|
||||
|[IntelligenzaArtificiale/Free-Auto-GPT](https://github.com/IntelligenzaArtificiale/Free-Auto-GPT) | 2060 |
|
||||
|[thomas-yanxin/LangChain-ChatGLM-Webui](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui) | 2039 |
|
||||
|[NVIDIA/NeMo-Guardrails](https://github.com/NVIDIA/NeMo-Guardrails) | 1992 |
|
||||
|[Farama-Foundation/PettingZoo](https://github.com/Farama-Foundation/PettingZoo) | 1949 |
|
||||
|[hwchase17/notion-qa](https://github.com/hwchase17/notion-qa) | 1915 |
|
||||
|[paulpierre/RasaGPT](https://github.com/paulpierre/RasaGPT) | 1783 |
|
||||
|[jupyterlab/jupyter-ai](https://github.com/jupyterlab/jupyter-ai) | 1761 |
|
||||
|[vocodedev/vocode-python](https://github.com/vocodedev/vocode-python) | 1627 |
|
||||
|[pinterest/querybook](https://github.com/pinterest/querybook) | 1509 |
|
||||
|[psychic-api/psychic](https://github.com/psychic-api/psychic) | 1499 |
|
||||
|[Kav-K/GPTDiscord](https://github.com/Kav-K/GPTDiscord) | 1476 |
|
||||
|[avinashkranjan/Amazing-Python-Scripts](https://github.com/avinashkranjan/Amazing-Python-Scripts) | 1471 |
|
||||
|[hegelai/prompttools](https://github.com/hegelai/prompttools) | 1392 |
|
||||
|[jina-ai/langchain-serve](https://github.com/jina-ai/langchain-serve) | 1370 |
|
||||
|[Forethought-Technologies/AutoChain](https://github.com/Forethought-Technologies/AutoChain) | 1360 |
|
||||
|[keephq/keep](https://github.com/keephq/keep) | 1357 |
|
||||
|[ttengwang/Caption-Anything](https://github.com/ttengwang/Caption-Anything) | 1345 |
|
||||
|[lunasec-io/lunasec](https://github.com/lunasec-io/lunasec) | 1342 |
|
||||
|[agiresearch/OpenAGI](https://github.com/agiresearch/OpenAGI) | 1332 |
|
||||
|[noahshinn024/reflexion](https://github.com/noahshinn024/reflexion) | 1314 |
|
||||
|[jina-ai/dev-gpt](https://github.com/jina-ai/dev-gpt) | 1314 |
|
||||
|[jina-ai/thinkgpt](https://github.com/jina-ai/thinkgpt) | 1313 |
|
||||
|[greshake/llm-security](https://github.com/greshake/llm-security) | 1299 |
|
||||
|[mmz-001/knowledge_gpt](https://github.com/mmz-001/knowledge_gpt) | 1237 |
|
||||
|[101dotxyz/GPTeam](https://github.com/101dotxyz/GPTeam) | 1232 |
|
||||
|[richardyc/Chrome-GPT](https://github.com/richardyc/Chrome-GPT) | 1223 |
|
||||
|[eyurtsev/kor](https://github.com/eyurtsev/kor) | 1192 |
|
||||
|[pluralsh/plural](https://github.com/pluralsh/plural) | 1126 |
|
||||
|[juncongmoo/chatllama](https://github.com/juncongmoo/chatllama) | 1117 |
|
||||
|[visual-openllm/visual-openllm](https://github.com/visual-openllm/visual-openllm) | 1110 |
|
||||
|[poe-platform/api-bot-tutorial](https://github.com/poe-platform/api-bot-tutorial) | 1096 |
|
||||
|[refuel-ai/autolabel](https://github.com/refuel-ai/autolabel) | 1080 |
|
||||
|[microsoft/X-Decoder](https://github.com/microsoft/X-Decoder) | 1075 |
|
||||
|[irgolic/AutoPR](https://github.com/irgolic/AutoPR) | 1068 |
|
||||
|[SamurAIGPT/Camel-AutoGPT](https://github.com/SamurAIGPT/Camel-AutoGPT) | 984 |
|
||||
|[peterw/Chat-with-Github-Repo](https://github.com/peterw/Chat-with-Github-Repo) | 957 |
|
||||
|[chatarena/chatarena](https://github.com/chatarena/chatarena) | 955 |
|
||||
|[griptape-ai/griptape](https://github.com/griptape-ai/griptape) | 944 |
|
||||
|[psychic-api/rag-stack](https://github.com/psychic-api/rag-stack) | 942 |
|
||||
|[nod-ai/SHARK](https://github.com/nod-ai/SHARK) | 909 |
|
||||
|[filip-michalsky/SalesGPT](https://github.com/filip-michalsky/SalesGPT) | 899 |
|
||||
|[melih-unsal/DemoGPT](https://github.com/melih-unsal/DemoGPT) | 896 |
|
||||
|[rlancemartin/auto-evaluator](https://github.com/rlancemartin/auto-evaluator) | 889 |
|
||||
|[cirediatpl/FigmaChain](https://github.com/cirediatpl/FigmaChain) | 868 |
|
||||
|[seanpixel/Teenage-AGI](https://github.com/seanpixel/Teenage-AGI) | 854 |
|
||||
|[cheshire-cat-ai/core](https://github.com/cheshire-cat-ai/core) | 847 |
|
||||
|[run-llama/llama-lab](https://github.com/run-llama/llama-lab) | 836 |
|
||||
|[corca-ai/EVAL](https://github.com/corca-ai/EVAL) | 818 |
|
||||
|[Anil-matcha/ChatPDF](https://github.com/Anil-matcha/ChatPDF) | 798 |
|
||||
|[alejandro-ao/ask-multiple-pdfs](https://github.com/alejandro-ao/ask-multiple-pdfs) | 782 |
|
||||
|[hwchase17/chat-your-data](https://github.com/hwchase17/chat-your-data) | 748 |
|
||||
|[LambdaLabsML/examples](https://github.com/LambdaLabsML/examples) | 741 |
|
||||
|[ajndkr/lanarky](https://github.com/ajndkr/lanarky) | 732 |
|
||||
|[microsoft/Llama-2-Onnx](https://github.com/microsoft/Llama-2-Onnx) | 722 |
|
||||
|[e-johnstonn/BriefGPT](https://github.com/e-johnstonn/BriefGPT) | 710 |
|
||||
|[billxbf/ReWOO](https://github.com/billxbf/ReWOO) | 710 |
|
||||
|[kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference](https://github.com/kennethleungty/Llama-2-Open-Source-LLM-CPU-Inference) | 707 |
|
||||
|[databrickslabs/pyspark-ai](https://github.com/databrickslabs/pyspark-ai) | 704 |
|
||||
|[OpenBMB/AgentVerse](https://github.com/OpenBMB/AgentVerse) | 704 |
|
||||
|[kreneskyp/ix](https://github.com/kreneskyp/ix) | 692 |
|
||||
|[akshata29/entaoai](https://github.com/akshata29/entaoai) | 682 |
|
||||
|[promptfoo/promptfoo](https://github.com/promptfoo/promptfoo) | 670 |
|
||||
|[getmetal/motorhead](https://github.com/getmetal/motorhead) | 662 |
|
||||
|[ruoccofabrizio/azure-open-ai-embeddings-qna](https://github.com/ruoccofabrizio/azure-open-ai-embeddings-qna) | 650 |
|
||||
|[YiVal/YiVal](https://github.com/YiVal/YiVal) | 632 |
|
||||
|[whyiyhw/chatgpt-wechat](https://github.com/whyiyhw/chatgpt-wechat) | 624 |
|
||||
|[SamurAIGPT/ChatGPT-Developer-Plugins](https://github.com/SamurAIGPT/ChatGPT-Developer-Plugins) | 617 |
|
||||
|[dot-agent/openagent](https://github.com/dot-agent/openagent) | 602 |
|
||||
|[msoedov/langcorn](https://github.com/msoedov/langcorn) | 588 |
|
||||
|[namuan/dr-doc-search](https://github.com/namuan/dr-doc-search) | 585 |
|
||||
|[microsoft/PodcastCopilot](https://github.com/microsoft/PodcastCopilot) | 581 |
|
||||
|[alexanderatallah/window.ai](https://github.com/alexanderatallah/window.ai) | 569 |
|
||||
|[StevenGrove/GPT4Tools](https://github.com/StevenGrove/GPT4Tools) | 568 |
|
||||
|[xusenlinzy/api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) | 559 |
|
||||
|[NoDataFound/hackGPT](https://github.com/NoDataFound/hackGPT) | 558 |
|
||||
|[langchain-ai/auto-evaluator](https://github.com/langchain-ai/auto-evaluator) | 554 |
|
||||
|[yeagerai/yeagerai-agent](https://github.com/yeagerai/yeagerai-agent) | 537 |
|
||||
|[FlagOpen/FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) | 534 |
|
||||
|[amosjyng/langchain-visualizer](https://github.com/amosjyng/langchain-visualizer) | 534 |
|
||||
|[OpenGenerativeAI/GenossGPT](https://github.com/OpenGenerativeAI/GenossGPT) | 524 |
|
||||
|[jina-ai/agentchain](https://github.com/jina-ai/agentchain) | 496 |
|
||||
|[mckaywrigley/repo-chat](https://github.com/mckaywrigley/repo-chat) | 495 |
|
||||
|[michaelthwan/searchGPT](https://github.com/michaelthwan/searchGPT) | 494 |
|
||||
|[explosion/spacy-llm](https://github.com/explosion/spacy-llm) | 492 |
|
||||
|[plastic-labs/tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 490 |
|
||||
|[freddyaboulton/gradio-tools](https://github.com/freddyaboulton/gradio-tools) | 488 |
|
||||
|[xuwenhao/geektime-ai-course](https://github.com/xuwenhao/geektime-ai-course) | 481 |
|
||||
|[tgscan-dev/tgscan](https://github.com/tgscan-dev/tgscan) | 480 |
|
||||
|[langchain-ai/langchain-aiplugin](https://github.com/langchain-ai/langchain-aiplugin) | 480 |
|
||||
|[mpaepper/content-chatbot](https://github.com/mpaepper/content-chatbot) | 473 |
|
||||
|[yvann-hub/Robby-chatbot](https://github.com/yvann-hub/Robby-chatbot) | 471 |
|
||||
|[steamship-core/steamship-langchain](https://github.com/steamship-core/steamship-langchain) | 467 |
|
||||
|[langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent) | 463 |
|
||||
|[jonra1993/fastapi-alembic-sqlmodel-async](https://github.com/jonra1993/fastapi-alembic-sqlmodel-async) | 463 |
|
||||
|[continuum-llms/chatgpt-memory](https://github.com/continuum-llms/chatgpt-memory) | 463 |
|
||||
|[poe-platform/poe-protocol](https://github.com/poe-platform/poe-protocol) | 441 |
|
||||
|[alejandro-ao/langchain-ask-pdf](https://github.com/alejandro-ao/langchain-ask-pdf) | 437 |
|
||||
|[Dicklesworthstone/llama_embeddings_fastapi_service](https://github.com/Dicklesworthstone/llama_embeddings_fastapi_service) | 432 |
|
||||
|[DataDog/dd-trace-py](https://github.com/DataDog/dd-trace-py) | 431 |
|
||||
|[daveebbelaar/langchain-experiments](https://github.com/daveebbelaar/langchain-experiments) | 431 |
|
||||
|[jiran214/GPT-vup](https://github.com/jiran214/GPT-vup) | 428 |
|
||||
|[Azure-Samples/openai](https://github.com/Azure-Samples/openai) | 419 |
|
||||
|[NimbleBoxAI/ChainFury](https://github.com/NimbleBoxAI/ChainFury) | 414 |
|
||||
|[CarperAI/OpenELM](https://github.com/CarperAI/OpenELM) | 411 |
|
||||
|[daodao97/chatdoc](https://github.com/daodao97/chatdoc) | 404 |
|
||||
|[MiuLab/Taiwan-LLaMa](https://github.com/MiuLab/Taiwan-LLaMa) | 402 |
|
||||
|[logan-markewich/llama_index_starter_pack](https://github.com/logan-markewich/llama_index_starter_pack) | 399 |
|
||||
|[mtenenholtz/chat-twitter](https://github.com/mtenenholtz/chat-twitter) | 394 |
|
||||
|[opentensor/bittensor](https://github.com/opentensor/bittensor) | 393 |
|
||||
|[showlab/VLog](https://github.com/showlab/VLog) | 392 |
|
||||
|[microsoft/sample-app-aoai-chatGPT](https://github.com/microsoft/sample-app-aoai-chatGPT) | 391 |
|
||||
|[truera/trulens](https://github.com/truera/trulens) | 390 |
|
||||
|[Anil-matcha/Chatbase](https://github.com/Anil-matcha/Chatbase) | 363 |
|
||||
|[marella/chatdocs](https://github.com/marella/chatdocs) | 360 |
|
||||
|[jondurbin/airoboros](https://github.com/jondurbin/airoboros) | 357 |
|
||||
|[mosaicml/examples](https://github.com/mosaicml/examples) | 353 |
|
||||
|[wandb/weave](https://github.com/wandb/weave) | 352 |
|
||||
|[huchenxucs/ChatDB](https://github.com/huchenxucs/ChatDB) | 350 |
|
||||
|[rsaryev/talk-codebase](https://github.com/rsaryev/talk-codebase) | 343 |
|
||||
|[steamship-packages/langchain-production-starter](https://github.com/steamship-packages/langchain-production-starter) | 335 |
|
||||
|[jerlendds/osintbuddy](https://github.com/jerlendds/osintbuddy) | 335 |
|
||||
|[andylokandy/gpt-4-search](https://github.com/andylokandy/gpt-4-search) | 329 |
|
||||
|[MagnivOrg/prompt-layer-library](https://github.com/MagnivOrg/prompt-layer-library) | 325 |
|
||||
|[personoids/personoids-lite](https://github.com/personoids/personoids-lite) | 319 |
|
||||
|[momegas/megabots](https://github.com/momegas/megabots) | 317 |
|
||||
|[itamargol/openai](https://github.com/itamargol/openai) | 312 |
|
||||
|[intel/intel-extension-for-transformers](https://github.com/intel/intel-extension-for-transformers) | 310 |
|
||||
|[monarch-initiative/ontogpt](https://github.com/monarch-initiative/ontogpt) | 310 |
|
||||
|[BlackHC/llm-strategy](https://github.com/BlackHC/llm-strategy) | 308 |
|
||||
|[Nuggt-dev/Nuggt](https://github.com/Nuggt-dev/Nuggt) | 305 |
|
||||
|[cofactoryai/textbase](https://github.com/cofactoryai/textbase) | 304 |
|
||||
|[Cheems-Seminar/grounded-segment-any-parts](https://github.com/Cheems-Seminar/grounded-segment-any-parts) | 296 |
|
||||
|[onlyphantom/llm-python](https://github.com/onlyphantom/llm-python) | 288 |
|
||||
|[morpheuslord/GPT_Vuln-analyzer](https://github.com/morpheuslord/GPT_Vuln-analyzer) | 285 |
|
||||
|[sullivan-sean/chat-langchainjs](https://github.com/sullivan-sean/chat-langchainjs) | 280 |
|
||||
|[wandb/edu](https://github.com/wandb/edu) | 277 |
|
||||
|[austin2035/chatpdf](https://github.com/austin2035/chatpdf) | 275 |
|
||||
|[liangwq/Chatglm_lora_multi-gpu](https://github.com/liangwq/Chatglm_lora_multi-gpu) | 273 |
|
||||
|[preset-io/promptimize](https://github.com/preset-io/promptimize) | 272 |
|
||||
|[Haste171/langchain-chatbot](https://github.com/Haste171/langchain-chatbot) | 271 |
|
||||
|[hnawaz007/pythondataanalysis](https://github.com/hnawaz007/pythondataanalysis) | 268 |
|
||||
|[JohnSnowLabs/langtest](https://github.com/JohnSnowLabs/langtest) | 268 |
|
||||
|[conceptofmind/toolformer](https://github.com/conceptofmind/toolformer) | 263 |
|
||||
|[sugarforever/LangChain-Tutorials](https://github.com/sugarforever/LangChain-Tutorials) | 260 |
|
||||
|[Safiullah-Rahu/CSV-AI](https://github.com/Safiullah-Rahu/CSV-AI) | 259 |
|
||||
|[artitw/text2text](https://github.com/artitw/text2text) | 257 |
|
||||
|[bborn/howdoi.ai](https://github.com/bborn/howdoi.ai) | 256 |
|
||||
|[JayZeeDesign/researcher-gpt](https://github.com/JayZeeDesign/researcher-gpt) | 252 |
|
||||
|[paolorechia/learn-langchain](https://github.com/paolorechia/learn-langchain) | 251 |
|
||||
|[ur-whitelab/exmol](https://github.com/ur-whitelab/exmol) | 251 |
|
||||
|[Azure-Samples/miyagi](https://github.com/Azure-Samples/miyagi) | 248 |
|
||||
|[recalign/RecAlign](https://github.com/recalign/RecAlign) | 243 |
|
||||
|[airobotlab/KoChatGPT](https://github.com/airobotlab/KoChatGPT) | 242 |
|
||||
|[explodinggradients/ragas](https://github.com/explodinggradients/ragas) | 232 |
|
||||
|[kaleido-lab/dolphin](https://github.com/kaleido-lab/dolphin) | 232 |
|
||||
|[hwchase17/chroma-langchain](https://github.com/hwchase17/chroma-langchain) | 230 |
|
||||
|[eosphoros-ai/DB-GPT-Hub](https://github.com/eosphoros-ai/DB-GPT-Hub) | 229 |
|
||||
|[shaman-ai/agent-actors](https://github.com/shaman-ai/agent-actors) | 227 |
|
||||
|[gia-guar/JARVIS-ChatGPT](https://github.com/gia-guar/JARVIS-ChatGPT) | 224 |
|
||||
|[shamspias/customizable-gpt-chatbot](https://github.com/shamspias/customizable-gpt-chatbot) | 223 |
|
||||
|[hwchase17/langchain-streamlit-template](https://github.com/hwchase17/langchain-streamlit-template) | 222 |
|
||||
|[alvarosevilla95/autolang](https://github.com/alvarosevilla95/autolang) | 221 |
|
||||
|[radi-cho/datasetGPT](https://github.com/radi-cho/datasetGPT) | 221 |
|
||||
|[gustavz/DataChad](https://github.com/gustavz/DataChad) | 219 |
|
||||
|[pablomarin/GPT-Azure-Search-Engine](https://github.com/pablomarin/GPT-Azure-Search-Engine) | 217 |
|
||||
|[su77ungr/CASALIOY](https://github.com/su77ungr/CASALIOY) | 217 |
|
||||
|[ennucore/clippinator](https://github.com/ennucore/clippinator) | 211 |
|
||||
|[edreisMD/plugnplai](https://github.com/edreisMD/plugnplai) | 210 |
|
||||
|[kaarthik108/snowChat](https://github.com/kaarthik108/snowChat) | 210 |
|
||||
|[PradipNichite/Youtube-Tutorials](https://github.com/PradipNichite/Youtube-Tutorials) | 206 |
|
||||
|[ur-whitelab/chemcrow-public](https://github.com/ur-whitelab/chemcrow-public) | 202 |
|
||||
|[CambioML/pykoi](https://github.com/CambioML/pykoi) | 199 |
|
||||
|[jbrukh/gpt-jargon](https://github.com/jbrukh/gpt-jargon) | 198 |
|
||||
|[LC1332/Chat-Haruhi-Suzumiya](https://github.com/LC1332/Chat-Haruhi-Suzumiya) | 196 |
|
||||
|[nicknochnack/LangchainDocuments](https://github.com/nicknochnack/LangchainDocuments) | 196 |
|
||||
|[yuanjie-ai/ChatLLM](https://github.com/yuanjie-ai/ChatLLM) | 196 |
|
||||
|[plchld/InsightFlow](https://github.com/plchld/InsightFlow) | 196 |
|
||||
|[yakami129/VirtualWife](https://github.com/yakami129/VirtualWife) | 194 |
|
||||
|[Mintplex-Labs/vector-admin](https://github.com/Mintplex-Labs/vector-admin) | 191 |
|
||||
|[SamPink/dev-gpt](https://github.com/SamPink/dev-gpt) | 190 |
|
||||
|[yasyf/compress-gpt](https://github.com/yasyf/compress-gpt) | 190 |
|
||||
|[benthecoder/ClassGPT](https://github.com/benthecoder/ClassGPT) | 190 |
|
||||
|[WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server) | 182 |
|
||||
|[voxel51/voxelgpt](https://github.com/voxel51/voxelgpt) | 181 |
|
||||
|[hardbyte/qabot](https://github.com/hardbyte/qabot) | 176 |
|
||||
|[orgexyz/BlockAGI](https://github.com/orgexyz/BlockAGI) | 174 |
|
||||
|[handrew/browserpilot](https://github.com/handrew/browserpilot) | 173 |
|
||||
|[miaoshouai/miaoshouai-assistant](https://github.com/miaoshouai/miaoshouai-assistant) | 172 |
|
||||
|[microsoft/azure-openai-in-a-day-workshop](https://github.com/microsoft/azure-openai-in-a-day-workshop) | 170 |
|
||||
|[kyegomez/swarms](https://github.com/kyegomez/swarms) | 169 |
|
||||
|[Azure-Samples/azure-search-power-skills](https://github.com/Azure-Samples/azure-search-power-skills) | 169 |
|
||||
|[chakkaradeep/pyCodeAGI](https://github.com/chakkaradeep/pyCodeAGI) | 169 |
|
||||
|[ethanyanjiali/minChatGPT](https://github.com/ethanyanjiali/minChatGPT) | 167 |
|
||||
|[ccurme/yolopandas](https://github.com/ccurme/yolopandas) | 166 |
|
||||
|[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) | 165 |
|
||||
|[Azure-Samples/azure-search-openai-demo-csharp](https://github.com/Azure-Samples/azure-search-openai-demo-csharp) | 164 |
|
||||
|[fengyuli-dev/multimedia-gpt](https://github.com/fengyuli-dev/multimedia-gpt) | 164 |
|
||||
|[grumpyp/aixplora](https://github.com/grumpyp/aixplora) | 162 |
|
||||
|[langchain-ai/web-explorer](https://github.com/langchain-ai/web-explorer) | 158 |
|
||||
|[JorisdeJong123/7-Days-of-LangChain](https://github.com/JorisdeJong123/7-Days-of-LangChain) | 158 |
|
||||
|[shauryr/S2QA](https://github.com/shauryr/S2QA) | 158 |
|
||||
|[Azure-Samples/jp-azureopenai-samples](https://github.com/Azure-Samples/jp-azureopenai-samples) | 157 |
|
||||
|[AkshitIreddy/Interactive-LLM-Powered-NPCs](https://github.com/AkshitIreddy/Interactive-LLM-Powered-NPCs) | 156 |
|
||||
|[ibiscp/LLM-IMDB](https://github.com/ibiscp/LLM-IMDB) | 156 |
|
||||
|[jmpaz/promptlib](https://github.com/jmpaz/promptlib) | 156 |
|
||||
|[mayooear/private-chatbot-mpt30b-langchain](https://github.com/mayooear/private-chatbot-mpt30b-langchain) | 155 |
|
||||
|[homanp/vercel-langchain](https://github.com/homanp/vercel-langchain) | 152 |
|
||||
|[mlops-for-all/mlops-for-all.github.io](https://github.com/mlops-for-all/mlops-for-all.github.io) | 151 |
|
||||
|[vaibkumr/prompt-optimizer](https://github.com/vaibkumr/prompt-optimizer) | 151 |
|
||||
|[Agenta-AI/agenta](https://github.com/Agenta-AI/agenta) | 150 |
|
||||
|[Klingefjord/chatgpt-telegram](https://github.com/Klingefjord/chatgpt-telegram) | 149 |
|
||||
|[menloparklab/falcon-langchain](https://github.com/menloparklab/falcon-langchain) | 148 |
|
||||
|[deeppavlov/dream](https://github.com/deeppavlov/dream) | 146 |
|
||||
|[positive666/Prompt-Can-Anything](https://github.com/positive666/Prompt-Can-Anything) | 145 |
|
||||
|[menloparklab/langchain-cohere-qdrant-doc-retrieval](https://github.com/menloparklab/langchain-cohere-qdrant-doc-retrieval) | 145 |
|
||||
|[realminchoi/babyagi-ui](https://github.com/realminchoi/babyagi-ui) | 145 |
|
||||
|[SpecterOps/Nemesis](https://github.com/SpecterOps/Nemesis) | 144 |
|
||||
|[Jaseci-Labs/jaseci](https://github.com/Jaseci-Labs/jaseci) | 144 |
|
||||
|[summarizepaper/summarizepaper](https://github.com/summarizepaper/summarizepaper) | 142 |
|
||||
|[peterw/StoryStorm](https://github.com/peterw/StoryStorm) | 141 |
|
||||
|[Aggregate-Intellect/practical-llms](https://github.com/Aggregate-Intellect/practical-llms) | 140 |
|
||||
|[streamlit/llm-examples](https://github.com/streamlit/llm-examples) | 140 |
|
||||
|[hirokidaichi/wanna](https://github.com/hirokidaichi/wanna) | 140 |
|
||||
|[Chainlit/cookbook](https://github.com/Chainlit/cookbook) | 139 |
|
||||
|[alphasecio/langchain-examples](https://github.com/alphasecio/langchain-examples) | 139 |
|
||||
|[flurb18/AgentOoba](https://github.com/flurb18/AgentOoba) | 139 |
|
||||
|[Teahouse-Studios/akari-bot](https://github.com/Teahouse-Studios/akari-bot) | 138 |
|
||||
|[yasyf/summ](https://github.com/yasyf/summ) | 138 |
|
||||
|[kulltc/chatgpt-sql](https://github.com/kulltc/chatgpt-sql) | 137 |
|
||||
|[v7labs/benchllm](https://github.com/v7labs/benchllm) | 135 |
|
||||
|[ray-project/langchain-ray](https://github.com/ray-project/langchain-ray) | 134 |
|
||||
|[petehunt/langchain-github-bot](https://github.com/petehunt/langchain-github-bot) | 134 |
|
||||
|[peterwnjenga/aigent](https://github.com/peterwnjenga/aigent) | 133 |
|
||||
|[jina-ai/fastapi-serve](https://github.com/jina-ai/fastapi-serve) | 133 |
|
||||
|[retr0reg/Ret2GPT](https://github.com/retr0reg/Ret2GPT) | 132 |
|
||||
|[agenthubdev/agenthub_operators](https://github.com/agenthubdev/agenthub_operators) | 131 |
|
||||
|[eunomia-bpf/GPTtrace](https://github.com/eunomia-bpf/GPTtrace) | 131 |
|
||||
|[solana-labs/chatgpt-plugin](https://github.com/solana-labs/chatgpt-plugin) | 130 |
|
||||
|[aurelio-labs/arxiv-bot](https://github.com/aurelio-labs/arxiv-bot) | 130 |
|
||||
|[ChuloAI/BrainChulo](https://github.com/ChuloAI/BrainChulo) | 128 |
|
||||
|[ssheng/BentoChain](https://github.com/ssheng/BentoChain) | 128 |
|
||||
|[mallahyari/drqa](https://github.com/mallahyari/drqa) | 127 |
|
||||
|[fixie-ai/fixie-examples](https://github.com/fixie-ai/fixie-examples) | 127 |
|
||||
|[davila7/file-gpt](https://github.com/davila7/file-gpt) | 127 |
|
||||
|[showlab/UniVTG](https://github.com/showlab/UniVTG) | 125 |
|
||||
|[zenml-io/zenml-projects](https://github.com/zenml-io/zenml-projects) | 125 |
|
||||
|[RedisVentures/redis-openai-qna](https://github.com/RedisVentures/redis-openai-qna) | 124 |
|
||||
|[PJLab-ADG/DriveLikeAHuman](https://github.com/PJLab-ADG/DriveLikeAHuman) | 122 |
|
||||
|[prof-frink-lab/slangchain](https://github.com/prof-frink-lab/slangchain) | 122 |
|
||||
|[Coding-Crashkurse/Langchain-Full-Course](https://github.com/Coding-Crashkurse/Langchain-Full-Course) | 121 |
|
||||
|[ciare-robotics/world-creator](https://github.com/ciare-robotics/world-creator) | 120 |
|
||||
|[blob42/Instrukt](https://github.com/blob42/Instrukt) | 120 |
|
||||
|[langchain-ai/langsmith-cookbook](https://github.com/langchain-ai/langsmith-cookbook) | 119 |
|
||||
|[OpenPluginACI/openplugin](https://github.com/OpenPluginACI/openplugin) | 118 |
|
||||
|[defenseunicorns/leapfrogai](https://github.com/defenseunicorns/leapfrogai) | 118 |
|
||||
|[sdaaron/QueryGPT](https://github.com/sdaaron/QueryGPT) | 117 |
|
||||
|[grumpyp/chroma-langchain-tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) | 117 |
|
||||
|[3Alan/DocsMind](https://github.com/3Alan/DocsMind) | 116 |
|
||||
|[CodeAlchemyAI/ViLT-GPT](https://github.com/CodeAlchemyAI/ViLT-GPT) | 114 |
|
||||
|[emarco177/ice_breaker](https://github.com/emarco177/ice_breaker) | 113 |
|
||||
|[nftblackmagic/flask-langchain](https://github.com/nftblackmagic/flask-langchain) | 113 |
|
||||
|[log1stics/voice-generator-webui](https://github.com/log1stics/voice-generator-webui) | 112 |
|
||||
|[nrl-ai/pautobot](https://github.com/nrl-ai/pautobot) | 110 |
|
||||
|[Azure/business-process-automation](https://github.com/Azure/business-process-automation) | 110 |
|
||||
|[MedalCollector/Orator](https://github.com/MedalCollector/Orator) | 109 |
|
||||
|[wombyz/HormoziGPT](https://github.com/wombyz/HormoziGPT) | 108 |
|
||||
|[afaqueumer/DocQA](https://github.com/afaqueumer/DocQA) | 106 |
|
||||
|[mortium91/langchain-assistant](https://github.com/mortium91/langchain-assistant) | 106 |
|
||||
|[Azure/azure-sdk-tools](https://github.com/Azure/azure-sdk-tools) | 105 |
|
||||
|[yeagerai/genworlds](https://github.com/yeagerai/genworlds) | 105 |
|
||||
|[AmineDiro/cria](https://github.com/AmineDiro/cria) | 104 |
|
||||
|[langchain-ai/text-split-explorer](https://github.com/langchain-ai/text-split-explorer) | 104 |
|
||||
|[luisroque/large_laguage_models](https://github.com/luisroque/large_laguage_models) | 104 |
|
||||
|[xuwenhao/mactalk-ai-course](https://github.com/xuwenhao/mactalk-ai-course) | 104 |
|
||||
|[Open-Swarm-Net/GPT-Swarm](https://github.com/Open-Swarm-Net/GPT-Swarm) | 104 |
|
||||
|[langchain-ai/langchain-aws-template](https://github.com/langchain-ai/langchain-aws-template) | 104 |
|
||||
|[aws-samples/aws-genai-llm-chatbot](https://github.com/aws-samples/aws-genai-llm-chatbot) | 103 |
|
||||
|[crosleythomas/MirrorGPT](https://github.com/crosleythomas/MirrorGPT) | 103 |
|
||||
|[Dicklesworthstone/llama2_aided_tesseract](https://github.com/Dicklesworthstone/llama2_aided_tesseract) | 101 |
|
||||
|
||||
|
||||
|
||||
_Generated by [github-dependents-info](https://github.com/nvuillam/github-dependents-info)_
|
||||
|
||||
[github-dependents-info --repo hwchase17/langchain --markdownfile dependents.md --minstars 100 --sort stars]
|
||||
`github-dependents-info --repo langchain-ai/langchain --markdownfile dependents.md --minstars 100 --sort stars`
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
"source": [
|
||||
"## Handling LLM API Errors\n",
|
||||
"\n",
|
||||
"This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit rate limits, any number of things. Therefor, using fallbacks can help protect against these types of things.\n",
|
||||
"This is maybe the most common use case for fallbacks. A request to an LLM API can fail for a variety of reasons - the API could be down, you could have hit rate limits, any number of things. Therefore, using fallbacks can help protect against these types of things.\n",
|
||||
"\n",
|
||||
"IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retying and not failing."
|
||||
"IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -84,7 +84,7 @@
|
||||
"# Let's use just the OpenAI LLm first, to show that we run into an error\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(openai_llm.invoke(\"Why did the the chicken cross the road?\"))\n",
|
||||
" print(openai_llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
|
||||
807
docs/extras/guides/local_llms.ipynb
Normal file
807
docs/extras/guides/local_llms.ipynb
Normal file
@@ -0,0 +1,807 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b8982428",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Private, local, open source LLMs\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
"The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), and [GPT4All](https://github.com/nomic-ai/gpt4all) underscore the demand to run LLMs locally (on your own device).\n",
|
||||
"\n",
|
||||
"This has at least two important benefits:\n",
|
||||
"\n",
|
||||
"1. `Privacy`: Your data is not sent to a third party, and it is not subject to the terms of service of a commercial service\n",
|
||||
"2. `Cost`: There is no inference fee, which is important for token-intensive applications (e.g., [long-running simulations](https://twitter.com/RLanceMartin/status/1691097659262820352?s=20), summarization)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"Running an LLM locally requires a few things:\n",
|
||||
"\n",
|
||||
"1. `Open source LLM`: An open source LLM that can be freely modified and shared \n",
|
||||
"2. `Inference`: Ability to run this LLM on your device w/ acceptable latency\n",
|
||||
"\n",
|
||||
"### Open Source LLMs\n",
|
||||
"\n",
|
||||
"Users can now gain access to a rapidly growing set of [open source LLMs](https://cameronrwolfe.substack.com/p/the-history-of-open-source-llms-better). \n",
|
||||
"\n",
|
||||
"These LLMs can be assessed across at least two dimentions (see figure):\n",
|
||||
" \n",
|
||||
"1. `Base model`: What is the base-model and how was it trained?\n",
|
||||
"2. `Fine-tuning approach`: Was the base-model fine-tuned and, if so, what [set of instructions](https://cameronrwolfe.substack.com/p/beyond-llama-the-power-of-open-llms#%C2%A7alpaca-an-instruction-following-llama-model) was used?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The relative performance of these models can be assessed using several leaderboards, including:\n",
|
||||
"\n",
|
||||
"1. [LmSys](https://chat.lmsys.org/?arena)\n",
|
||||
"2. [GPT4All](https://gpt4all.io/index.html)\n",
|
||||
"3. [HuggingFace](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard)\n",
|
||||
"\n",
|
||||
"### Inference\n",
|
||||
"\n",
|
||||
"A few frameworks for this have emerged to support inference of open source LLMs on various devices:\n",
|
||||
"\n",
|
||||
"1. [`llama.cpp`](https://github.com/ggerganov/llama.cpp): C++ implementation of llama inference code with [weight optimization / quantization](https://finbarr.ca/how-is-llama-cpp-possible/)\n",
|
||||
"2. [`gpt4all`](https://docs.gpt4all.io/index.html): Optimized C backend for inference\n",
|
||||
"3. [`Ollama`](https://ollama.ai/): Bundles model weights and environment into an app that runs on device and serves the LLM \n",
|
||||
"\n",
|
||||
"In general, these frameworks will do a few things:\n",
|
||||
"\n",
|
||||
"1. `Quantization`: Reduce the memory footprint of the raw model weights\n",
|
||||
"2. `Efficient implementation for inference`: Support inference on consumer hardware (e.g., CPU or laptop GPU)\n",
|
||||
"\n",
|
||||
"In particular, see [this excellent post](https://finbarr.ca/how-is-llama-cpp-possible/) on the importance of quantization.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"With less precision, we radically decrease the memory needed to store the LLM in memory.\n",
|
||||
"\n",
|
||||
"In addition, we can see the importance of GPU memory bandwidth [sheet](https://docs.google.com/spreadsheets/d/1OehfHHNSn66BP2h3Bxp2NJTVX97icU0GmCXF6pK23H8/edit#gid=0)!\n",
|
||||
"\n",
|
||||
"A Mac M2 Max is 5-6x faster than a M1 for inference due to the larger GPU memory bandwidth.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Quickstart\n",
|
||||
"\n",
|
||||
"[`Ollama`](https://ollama.ai/) is one way to easily run inference on macOS.\n",
|
||||
" \n",
|
||||
"The instructions [here](docs/integrations/llms/ollama) provide details, which we summarize:\n",
|
||||
" \n",
|
||||
"* [Download and run](https://ollama.ai/download) the app\n",
|
||||
"* From command line, fetch a model from this [list of options](https://github.com/jmorganca/ollama): e.g., `ollama pull llama2`\n",
|
||||
"* When the app is running, all models are automatically served on `localhost:11434`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "86178adb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The first man on the moon was Neil Armstrong, who landed on the moon on July 20, 1969 as part of the Apollo 11 mission. obviously.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import Ollama\n",
|
||||
"llm = Ollama(model=\"llama2\")\n",
|
||||
"llm(\"The first man on the moon was ...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "343ab645",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Stream tokens as they are being generated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "9cd83603",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" The first man to walk on the moon was Neil Armstrong, an American astronaut who was part of the Apollo 11 mission in 1969. февруари 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon's surface, famously declaring \"That's one small step for man, one giant leap for mankind\" as he took his first steps. He was followed by fellow astronaut Edwin \"Buzz\" Aldrin, who also walked on the moon during the mission."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The first man to walk on the moon was Neil Armstrong, an American astronaut who was part of the Apollo 11 mission in 1969. февруари 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon\\'s surface, famously declaring \"That\\'s one small step for man, one giant leap for mankind\" as he took his first steps. He was followed by fellow astronaut Edwin \"Buzz\" Aldrin, who also walked on the moon during the mission.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler \n",
|
||||
"llm = Ollama(model=\"llama2\", \n",
|
||||
" callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]))\n",
|
||||
"llm(\"The first man on the moon was ...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5cb27414",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment\n",
|
||||
"\n",
|
||||
"Inference speed is a chllenge when running models locally (see above).\n",
|
||||
"\n",
|
||||
"To minimize latency, it is desiable to run models locally on GPU, which ships with many consumer laptops [e.g., Apple devices](https://www.apple.com/newsroom/2022/06/apple-unveils-m2-with-breakthrough-performance-and-capabilities/).\n",
|
||||
"\n",
|
||||
"And even with GPU, the available GPU memory bandwidth (as noted above) is important.\n",
|
||||
"\n",
|
||||
"### Running Apple silicon GPU\n",
|
||||
"\n",
|
||||
"`Ollama` will automatically utilize the GPU on Apple devices.\n",
|
||||
" \n",
|
||||
"Other frameworks require the user to set up the environment to utilize the Apple GPU.\n",
|
||||
"\n",
|
||||
"For example, `llama.cpp` python bindings can be configured to use the GPU via [Metal](https://developer.apple.com/metal/).\n",
|
||||
"\n",
|
||||
"Metal is a graphics and compute API created by Apple providing near-direct access to the GPU. \n",
|
||||
"\n",
|
||||
"See the [`llama.cpp`](docs/integrations/llms/llamacpp) setup [here](https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md) to enable this.\n",
|
||||
"\n",
|
||||
"In particular, ensure that conda is using the correct virtual enviorment that you created (`miniforge3`).\n",
|
||||
"\n",
|
||||
"E.g., for me:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"conda activate /Users/rlm/miniforge3/envs/llama\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"With the above confirmed, then:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c382e79a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## LLMs\n",
|
||||
"\n",
|
||||
"There are various ways to gain access to quantized model weights.\n",
|
||||
"\n",
|
||||
"1. [`HuggingFace`](https://huggingface.co/TheBloke) - Many quantized model are available for download and can be run with framework such as [`llama.cpp`](https://github.com/ggerganov/llama.cpp)\n",
|
||||
"2. [`gpt4all`](https://gpt4all.io/index.html) - The model explorer offers a leaderboard of metrics and associated quantized models available for download \n",
|
||||
"3. [`Ollama`](https://github.com/jmorganca/ollama) - Several models can be accessed directly via `pull`\n",
|
||||
"\n",
|
||||
"### Ollama\n",
|
||||
"\n",
|
||||
"With [Ollama](docs/integrations/llms/ollama), fetch a model via `ollama pull <model family>:<tag>`:\n",
|
||||
"\n",
|
||||
"* E.g., for Llama-7b: `ollama pull llama2` will download the most basic version of the model (e.g., smallest # parameters and 4 bit quantization)\n",
|
||||
"* We can also specify a particular version from the [model list](https://github.com/jmorganca/ollama), e.g., `ollama pull llama2:13b`\n",
|
||||
"* See the full set of parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "8ecd2f78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Sure! Here\\'s the answer, broken down step by step:\\n\\nThe first man on the moon was... Neil Armstrong.\\n\\nHere\\'s how I arrived at that answer:\\n\\n1. The first manned mission to land on the moon was Apollo 11.\\n2. The mission included three astronauts: Neil Armstrong, Edwin \"Buzz\" Aldrin, and Michael Collins.\\n3. Neil Armstrong was the mission commander and the first person to set foot on the moon.\\n4. On July 20, 1969, Armstrong stepped out of the lunar module Eagle and onto the moon\\'s surface, famously declaring \"That\\'s one small step for man, one giant leap for mankind.\"\\n\\nSo, the first man on the moon was Neil Armstrong!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import Ollama\n",
|
||||
"llm = Ollama(model=\"llama2:13b\")\n",
|
||||
"llm(\"The first man on the moon was ... think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "07c8c0d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Llama.cpp\n",
|
||||
"\n",
|
||||
"Llama.cpp is compatible with a [broad set of models](https://github.com/ggerganov/llama.cpp).\n",
|
||||
"\n",
|
||||
"For example, below we run inference on `llama2-13b` with 4 bit quantization downloaded from [HuggingFace](https://huggingface.co/TheBloke/Llama-2-13B-GGML/tree/main).\n",
|
||||
"\n",
|
||||
"As noted above, see the [API reference](https://api.python.langchain.com/en/latest/llms/langchain.llms.llamacpp.LlamaCpp.html?highlight=llamacpp#langchain.llms.llamacpp.LlamaCpp) for the full set of parameters. \n",
|
||||
"\n",
|
||||
"From the [llama.cpp docs](https://python.langchain.com/docs/integrations/llms/llamacpp), a few are worth commenting on:\n",
|
||||
"\n",
|
||||
"`n_gpu_layers`: number of layers to be loaded into GPU memory\n",
|
||||
"\n",
|
||||
"* Value: 1\n",
|
||||
"* Meaning: Only one layer of the model will be loaded into GPU memory (1 is often sufficient).\n",
|
||||
"\n",
|
||||
"`n_batch`: number of tokens the model should process in parallel \n",
|
||||
"* Value: n_batch\n",
|
||||
"* Meaning: It's recommended to choose a value between 1 and n_ctx (which in this case is set to 2048)\n",
|
||||
"\n",
|
||||
"`n_ctx`: Token context window .\n",
|
||||
"* Value: 2048\n",
|
||||
"* Meaning: The model will consider a window of 2048 tokens at a time\n",
|
||||
"\n",
|
||||
"`f16_kv`: whether the model should use half-precision for the key/value cache\n",
|
||||
"* Value: True\n",
|
||||
"* Meaning: The model will use half-precision, which can be more memory efficient; Metal only support True."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5eba38dc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install llama-cpp-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "9d5f94b5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"objc[10142]: Class GGMLMetalClass is implemented in both /Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/gpt4all/llmodel_DO_NOT_MODIFY/build/libreplit-mainline-metal.dylib (0x2a0c4c208) and /Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/llama_cpp/libllama.dylib (0x2c28bc208). One of the two will be used. Which one is undefined.\n",
|
||||
"llama.cpp: loading model from /Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\n",
|
||||
"llama_model_load_internal: format = ggjt v3 (latest)\n",
|
||||
"llama_model_load_internal: n_vocab = 32000\n",
|
||||
"llama_model_load_internal: n_ctx = 2048\n",
|
||||
"llama_model_load_internal: n_embd = 5120\n",
|
||||
"llama_model_load_internal: n_mult = 256\n",
|
||||
"llama_model_load_internal: n_head = 40\n",
|
||||
"llama_model_load_internal: n_layer = 40\n",
|
||||
"llama_model_load_internal: n_rot = 128\n",
|
||||
"llama_model_load_internal: freq_base = 10000.0\n",
|
||||
"llama_model_load_internal: freq_scale = 1\n",
|
||||
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
|
||||
"llama_model_load_internal: n_ff = 13824\n",
|
||||
"llama_model_load_internal: model size = 13B\n",
|
||||
"llama_model_load_internal: ggml ctx size = 0.09 MB\n",
|
||||
"llama_model_load_internal: mem required = 8953.71 MB (+ 1608.00 MB per state)\n",
|
||||
"llama_new_context_with_model: kv self size = 1600.00 MB\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"ggml_metal_init: loading '/Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/llama_cpp/ggml-metal.metal'\n",
|
||||
"ggml_metal_init: loaded kernel_add 0x47774af60\n",
|
||||
"ggml_metal_init: loaded kernel_mul 0x47774bc00\n",
|
||||
"ggml_metal_init: loaded kernel_mul_row 0x47774c230\n",
|
||||
"ggml_metal_init: loaded kernel_scale 0x47774c890\n",
|
||||
"ggml_metal_init: loaded kernel_silu 0x47774cef0\n",
|
||||
"ggml_metal_init: loaded kernel_relu 0x10e33e500\n",
|
||||
"ggml_metal_init: loaded kernel_gelu 0x47774b2f0\n",
|
||||
"ggml_metal_init: loaded kernel_soft_max 0x47771a580\n",
|
||||
"ggml_metal_init: loaded kernel_diag_mask_inf 0x47774dab0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_f16 0x47774e110\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_0 0x47774e7d0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_1 0x13efd7170\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q2_K 0x13efd73d0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q3_K 0x13efd7630\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_K 0x13efd7890\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q5_K 0x4744c9740\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q6_K 0x4744ca6b0\n",
|
||||
"ggml_metal_init: loaded kernel_rms_norm 0x4744cb250\n",
|
||||
"ggml_metal_init: loaded kernel_norm 0x4744cb970\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_f16_f32 0x10e33f700\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_0_f32 0x10e33fcd0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_1_f32 0x4744cc2d0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q2_K_f32 0x4744cc6f0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q3_K_f32 0x4744cd6b0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_K_f32 0x4744cde20\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q5_K_f32 0x10e33ff30\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q6_K_f32 0x10e340190\n",
|
||||
"ggml_metal_init: loaded kernel_rope 0x10e3403f0\n",
|
||||
"ggml_metal_init: loaded kernel_alibi_f32 0x10e340de0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f16 0x10e3416d0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f32 0x10e342080\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f16_f16 0x10e342ca0\n",
|
||||
"ggml_metal_init: recommendedMaxWorkingSetSize = 21845.34 MB\n",
|
||||
"ggml_metal_init: hasUnifiedMemory = true\n",
|
||||
"ggml_metal_init: maxTransferRate = built-in GPU\n",
|
||||
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 6984.06 MB, ( 6986.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'eval ' buffer, size = 1032.00 MB, ( 8018.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 1602.00 MB, ( 9620.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr0 ' buffer, size = 426.00 MB, (10046.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr1 ' buffer, size = 512.00 MB, (10558.19 / 21845.34)\n",
|
||||
"AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\",\n",
|
||||
" n_gpu_layers=1,\n",
|
||||
" n_batch=512,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" f16_kv=True, \n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f56f5168",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The console log will show the the below to indicate Metal was enabled properly from steps above:\n",
|
||||
"```\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "7890a077",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Llama.generate: prefix-match hit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" and use logical reasoning to figure out who the first man on the moon was.\n",
|
||||
"\n",
|
||||
"Here are some clues:\n",
|
||||
"\n",
|
||||
"1. The first man on the moon was an American.\n",
|
||||
"2. He was part of the Apollo 11 mission.\n",
|
||||
"3. He stepped out of the lunar module and became the first person to set foot on the moon's surface.\n",
|
||||
"4. His last name is Armstrong.\n",
|
||||
"\n",
|
||||
"Now, let's use our reasoning skills to figure out who the first man on the moon was. Based on clue #1, we know that the first man on the moon was an American. Clue #2 tells us that he was part of the Apollo 11 mission. Clue #3 reveals that he was the first person to set foot on the moon's surface. And finally, clue #4 gives us his last name: Armstrong.\n",
|
||||
"Therefore, the first man on the moon was Neil Armstrong!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 9623.21 ms\n",
|
||||
"llama_print_timings: sample time = 143.77 ms / 203 runs ( 0.71 ms per token, 1412.01 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 485.94 ms / 7 tokens ( 69.42 ms per token, 14.40 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 6385.16 ms / 202 runs ( 31.61 ms per token, 31.64 tokens per second)\n",
|
||||
"llama_print_timings: total time = 7279.28 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" and use logical reasoning to figure out who the first man on the moon was.\\n\\nHere are some clues:\\n\\n1. The first man on the moon was an American.\\n2. He was part of the Apollo 11 mission.\\n3. He stepped out of the lunar module and became the first person to set foot on the moon's surface.\\n4. His last name is Armstrong.\\n\\nNow, let's use our reasoning skills to figure out who the first man on the moon was. Based on clue #1, we know that the first man on the moon was an American. Clue #2 tells us that he was part of the Apollo 11 mission. Clue #3 reveals that he was the first person to set foot on the moon's surface. And finally, clue #4 gives us his last name: Armstrong.\\nTherefore, the first man on the moon was Neil Armstrong!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm(\"The first man on the moon was ... Let's think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "831ddf7c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### GPT4All\n",
|
||||
"\n",
|
||||
"We can use model weights downloaded from [GPT4All](https://python.langchain.com/docs/integrations/llms/gpt4all) model explorer.\n",
|
||||
"\n",
|
||||
"Similar to what is shown above, we can run inference and use [the API reference](https://api.python.langchain.com/en/latest/llms/langchain.llms.gpt4all.GPT4All.html?highlight=gpt4all#langchain.llms.gpt4all.GPT4All) to set parameters of interest."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e27baf6e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install gpt4all"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "b55a2147",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found model file at /Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\n",
|
||||
"llama_new_context_with_model: max tensor size = 87.89 MB\n",
|
||||
"llama_new_context_with_model: max tensor size = 87.89 MB\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama.cpp: using Metal\n",
|
||||
"llama.cpp: loading model from /Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\n",
|
||||
"llama_model_load_internal: format = ggjt v3 (latest)\n",
|
||||
"llama_model_load_internal: n_vocab = 32001\n",
|
||||
"llama_model_load_internal: n_ctx = 2048\n",
|
||||
"llama_model_load_internal: n_embd = 5120\n",
|
||||
"llama_model_load_internal: n_mult = 256\n",
|
||||
"llama_model_load_internal: n_head = 40\n",
|
||||
"llama_model_load_internal: n_layer = 40\n",
|
||||
"llama_model_load_internal: n_rot = 128\n",
|
||||
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
|
||||
"llama_model_load_internal: n_ff = 13824\n",
|
||||
"llama_model_load_internal: n_parts = 1\n",
|
||||
"llama_model_load_internal: model size = 13B\n",
|
||||
"llama_model_load_internal: ggml ctx size = 0.09 MB\n",
|
||||
"llama_model_load_internal: mem required = 9031.71 MB (+ 1608.00 MB per state)\n",
|
||||
"llama_new_context_with_model: kv self size = 1600.00 MB\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"ggml_metal_init: loading '/Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/gpt4all/llmodel_DO_NOT_MODIFY/build/ggml-metal.metal'\n",
|
||||
"ggml_metal_init: loaded kernel_add 0x37944d850\n",
|
||||
"ggml_metal_init: loaded kernel_mul 0x37944f350\n",
|
||||
"ggml_metal_init: loaded kernel_mul_row 0x37944fdd0\n",
|
||||
"ggml_metal_init: loaded kernel_scale 0x3794505a0\n",
|
||||
"ggml_metal_init: loaded kernel_silu 0x379450800\n",
|
||||
"ggml_metal_init: loaded kernel_relu 0x379450a60\n",
|
||||
"ggml_metal_init: loaded kernel_gelu 0x379450cc0\n",
|
||||
"ggml_metal_init: loaded kernel_soft_max 0x379450ff0\n",
|
||||
"ggml_metal_init: loaded kernel_diag_mask_inf 0x379451250\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_f16 0x3794514b0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_0 0x379451710\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_1 0x379451970\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q2_k 0x379451bd0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q3_k 0x379451e30\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_k 0x379452090\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q5_k 0x3794522f0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q6_k 0x379452550\n",
|
||||
"ggml_metal_init: loaded kernel_rms_norm 0x3794527b0\n",
|
||||
"ggml_metal_init: loaded kernel_norm 0x379452a10\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_f16_f32 0x379452c70\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_0_f32 0x379452ed0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_1_f32 0x379453130\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q2_k_f32 0x379453390\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q3_k_f32 0x3794535f0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_k_f32 0x379453850\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q5_k_f32 0x379453ab0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q6_k_f32 0x379453d10\n",
|
||||
"ggml_metal_init: loaded kernel_rope 0x379453f70\n",
|
||||
"ggml_metal_init: loaded kernel_alibi_f32 0x3794541d0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f16 0x379454430\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f32 0x379454690\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f16_f16 0x3794548f0\n",
|
||||
"ggml_metal_init: recommendedMaxWorkingSetSize = 21845.34 MB\n",
|
||||
"ggml_metal_init: hasUnifiedMemory = true\n",
|
||||
"ggml_metal_init: maxTransferRate = built-in GPU\n",
|
||||
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 6984.06 MB, (17542.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'eval ' buffer, size = 1024.00 MB, (18566.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 1602.00 MB, (20168.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr0 ' buffer, size = 512.00 MB, (20680.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr1 ' buffer, size = 512.00 MB, (21192.94 / 21845.34)\n",
|
||||
"ggml_metal_free: deallocating\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import GPT4All\n",
|
||||
"llm = GPT4All(model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "e3d4526f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\".\\n1) The United States decides to send a manned mission to the moon.2) They choose their best astronauts and train them for this specific mission.3) They build a spacecraft that can take humans to the moon, called the Lunar Module (LM).4) They also create a larger spacecraft, called the Saturn V rocket, which will launch both the LM and the Command Service Module (CSM), which will carry the astronauts into orbit.5) The mission is planned down to the smallest detail: from the trajectory of the rockets to the exact movements of the astronauts during their moon landing.6) On July 16, 1969, the Saturn V rocket launches from Kennedy Space Center in Florida, carrying the Apollo 11 mission crew into space.7) After one and a half orbits around the Earth, the LM separates from the CSM and begins its descent to the moon's surface.8) On July 20, 1969, at 2:56 pm EDT (GMT-4), Neil Armstrong becomes the first man on the moon. He speaks these\""
|
||||
]
|
||||
},
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm(\"The first man on the moon was ... Let's think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b84e543",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompts\n",
|
||||
"\n",
|
||||
"Some LLMs will benefit from specific prompts.\n",
|
||||
"\n",
|
||||
"For example, llama2 can use [special tokens](https://twitter.com/RLanceMartin/status/1681879318493003776?s=20).\n",
|
||||
"\n",
|
||||
"We can use `ConditionalPromptSelector` to set prompt based on the model type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "d082b10a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama.cpp: loading model from /Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\n",
|
||||
"llama_model_load_internal: format = ggjt v3 (latest)\n",
|
||||
"llama_model_load_internal: n_vocab = 32000\n",
|
||||
"llama_model_load_internal: n_ctx = 2048\n",
|
||||
"llama_model_load_internal: n_embd = 5120\n",
|
||||
"llama_model_load_internal: n_mult = 256\n",
|
||||
"llama_model_load_internal: n_head = 40\n",
|
||||
"llama_model_load_internal: n_layer = 40\n",
|
||||
"llama_model_load_internal: n_rot = 128\n",
|
||||
"llama_model_load_internal: freq_base = 10000.0\n",
|
||||
"llama_model_load_internal: freq_scale = 1\n",
|
||||
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
|
||||
"llama_model_load_internal: n_ff = 13824\n",
|
||||
"llama_model_load_internal: model size = 13B\n",
|
||||
"llama_model_load_internal: ggml ctx size = 0.09 MB\n",
|
||||
"llama_model_load_internal: mem required = 8953.71 MB (+ 1608.00 MB per state)\n",
|
||||
"llama_new_context_with_model: kv self size = 1600.00 MB\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"ggml_metal_init: loading '/Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/llama_cpp/ggml-metal.metal'\n",
|
||||
"ggml_metal_init: loaded kernel_add 0x4744d09d0\n",
|
||||
"ggml_metal_init: loaded kernel_mul 0x3781cb3d0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_row 0x37813bb60\n",
|
||||
"ggml_metal_init: loaded kernel_scale 0x474481080\n",
|
||||
"ggml_metal_init: loaded kernel_silu 0x4744d29f0\n",
|
||||
"ggml_metal_init: loaded kernel_relu 0x3781254c0\n",
|
||||
"ggml_metal_init: loaded kernel_gelu 0x47447f280\n",
|
||||
"ggml_metal_init: loaded kernel_soft_max 0x4744cf470\n",
|
||||
"ggml_metal_init: loaded kernel_diag_mask_inf 0x4744cf6d0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_f16 0x4744cf930\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_0 0x4744cfb90\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_1 0x4744cfdf0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q2_K 0x4744d0050\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q3_K 0x4744ce980\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_K 0x4744cebe0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q5_K 0x4744cee40\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q6_K 0x4744cf0a0\n",
|
||||
"ggml_metal_init: loaded kernel_rms_norm 0x474482450\n",
|
||||
"ggml_metal_init: loaded kernel_norm 0x4744826b0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_f16_f32 0x474482910\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_0_f32 0x474482b70\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_1_f32 0x474482dd0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q2_K_f32 0x474483030\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q3_K_f32 0x474483290\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_K_f32 0x4744834f0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q5_K_f32 0x474483750\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q6_K_f32 0x4744839b0\n",
|
||||
"ggml_metal_init: loaded kernel_rope 0x474483c10\n",
|
||||
"ggml_metal_init: loaded kernel_alibi_f32 0x474483e70\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f16 0x4744840d0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f32 0x474484330\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f16_f16 0x474484590\n",
|
||||
"ggml_metal_init: recommendedMaxWorkingSetSize = 21845.34 MB\n",
|
||||
"ggml_metal_init: hasUnifiedMemory = true\n",
|
||||
"ggml_metal_init: maxTransferRate = built-in GPU\n",
|
||||
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 6984.06 MB, ( 6986.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'eval ' buffer, size = 1032.00 MB, ( 8018.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 1602.00 MB, ( 9620.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr0 ' buffer, size = 426.00 MB, (10046.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr1 ' buffer, size = 512.00 MB, (10558.94 / 21845.34)\n",
|
||||
"AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Set our LLM\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\",\n",
|
||||
" n_gpu_layers=1,\n",
|
||||
" n_batch=512,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" f16_kv=True, \n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "66656084",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set the associated prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "8555f5bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"PromptTemplate(input_variables=['question'], output_parser=None, partial_variables={}, template='<<SYS>> \\n You are an assistant tasked with improving Google search results. \\n <</SYS>> \\n\\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \\n\\n {question} [/INST]', template_format='f-string', validate_template=True)"
|
||||
]
|
||||
},
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"from langchain.chains.prompt_selector import ConditionalPromptSelector\n",
|
||||
"\n",
|
||||
"DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=\"\"\"<<SYS>> \\n You are an assistant tasked with improving Google search \\\n",
|
||||
"results. \\n <</SYS>> \\n\\n [INST] Generate THREE Google search queries that \\\n",
|
||||
"are similar to this question. The output should be a numbered list of questions \\\n",
|
||||
"and each should have a question mark at the end: \\n\\n {question} [/INST]\"\"\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"DEFAULT_SEARCH_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=\"\"\"You are an assistant tasked with improving Google search \\\n",
|
||||
"results. Generate THREE Google search queries that are similar to \\\n",
|
||||
"this question. The output should be a numbered list of questions and each \\\n",
|
||||
"should have a question mark at the end: {question}\"\"\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(\n",
|
||||
" default_prompt=DEFAULT_SEARCH_PROMPT,\n",
|
||||
" conditionals=[\n",
|
||||
" (lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)\n",
|
||||
"prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "d0aedfd2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Sure! Here are three similar search queries with a question mark at the end:\n",
|
||||
"\n",
|
||||
"1. Which NBA team did LeBron James lead to a championship in the year he was drafted?\n",
|
||||
"2. Who won the Grammy Awards for Best New Artist and Best Female Pop Vocal Performance in the same year that Lady Gaga was born?\n",
|
||||
"3. What MLB team did Babe Ruth play for when he hit 60 home runs in a single season?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 14943.19 ms\n",
|
||||
"llama_print_timings: sample time = 72.93 ms / 101 runs ( 0.72 ms per token, 1384.87 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 14942.95 ms / 93 tokens ( 160.68 ms per token, 6.22 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 3430.85 ms / 100 runs ( 34.31 ms per token, 29.15 tokens per second)\n",
|
||||
"llama_print_timings: total time = 18578.26 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Sure! Here are three similar search queries with a question mark at the end:\\n\\n1. Which NBA team did LeBron James lead to a championship in the year he was drafted?\\n2. Who won the Grammy Awards for Best New Artist and Best Female Pop Vocal Performance in the same year that Lady Gaga was born?\\n3. What MLB team did Babe Ruth play for when he hit 60 home runs in a single season?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Chain\n",
|
||||
"llm_chain = LLMChain(prompt=prompt,llm=llm)\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year that Justin Bieber was born?\"\n",
|
||||
"llm_chain.run({\"question\":question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6ba66260",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use cases\n",
|
||||
"\n",
|
||||
"Given an `llm` created from one of the models above, you can use it for [many use cases](docs/use_cases).\n",
|
||||
"\n",
|
||||
"For example, here is a guide to [RAG](docs/use_cases/question_answering/how_to/local_retrieval_qa) with local LLMs.\n",
|
||||
"\n",
|
||||
"In general, use cases for local model can be driven by at least two factors:\n",
|
||||
"\n",
|
||||
"* `Privacy`: private data (e.g., journals, etc) that a user does not want to share \n",
|
||||
"* `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n",
|
||||
"\n",
|
||||
"There are a few approach to support specific use-cases: \n",
|
||||
"\n",
|
||||
"* Fine-tuning (e.g., [gpt-llm-trainer](https://github.com/mshumer/gpt-llm-trainer), [Anyscale](https://www.anyscale.com/blog/fine-tuning-llama-2-a-comprehensive-case-study-for-tailoring-models-to-unique-applications)) \n",
|
||||
"* [Function-calling](https://github.com/MeetKai/functionary/tree/main) for use-cases like extraction or tagging\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
105
docs/extras/guides/pydantic_compatibility.md
Normal file
105
docs/extras/guides/pydantic_compatibility.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Pydantic Compatibility
|
||||
|
||||
- Pydantic v2 was released in June, 2023 (https://docs.pydantic.dev/2.0/blog/pydantic-v2-final/)
|
||||
- v2 contains has a number of breaking changes (https://docs.pydantic.dev/2.0/migration/)
|
||||
- Pydantic v2 and v1 are under the same package name, so both versions cannot be installed at the same time
|
||||
|
||||
## LangChain Pydantic Migration Plan
|
||||
|
||||
As of `langchain>=0.0.267`, LangChain will allow users to install either Pydantic V1 or V2.
|
||||
* Internally LangChain will continue to [use V1](https://docs.pydantic.dev/latest/migration/#continue-using-pydantic-v1-features).
|
||||
* During this time, users can pin their pydantic version to v1 to avoid breaking changes, or start a partial
|
||||
migration using pydantic v2 throughout their code, but avoiding mixing v1 and v2 code for LangChain (see below).
|
||||
|
||||
User can either pin to pydantic v1, and upgrade their code in one go once LangChain has migrated to v2 internally, or they can start a partial migration to v2, but must avoid mixing v1 and v2 code for LangChain.
|
||||
|
||||
Below are two examples of showing how to avoid mixing pydantic v1 and v2 code in
|
||||
the case of inheritance and in the case of passing objects to LangChain.
|
||||
|
||||
**Example 1: Extending via inheritance**
|
||||
|
||||
**YES**
|
||||
|
||||
```python
|
||||
from pydantic.v1 import root_validator, validator
|
||||
|
||||
class CustomTool(BaseTool): # BaseTool is v1 code
|
||||
x: int = Field(default=1)
|
||||
|
||||
def _run(*args, **kwargs):
|
||||
return "hello"
|
||||
|
||||
@validator('x') # v1 code
|
||||
@classmethod
|
||||
def validate_x(cls, x: int) -> int:
|
||||
return 1
|
||||
|
||||
|
||||
CustomTool(
|
||||
name='custom_tool',
|
||||
description="hello",
|
||||
x=1,
|
||||
)
|
||||
```
|
||||
|
||||
Mixing Pydantic v2 primitives with Pydantic v1 primitives can raise cryptic errors
|
||||
|
||||
**NO**
|
||||
|
||||
```python
|
||||
from pydantic import Field, field_validator # pydantic v2
|
||||
|
||||
class CustomTool(BaseTool): # BaseTool is v1 code
|
||||
x: int = Field(default=1)
|
||||
|
||||
def _run(*args, **kwargs):
|
||||
return "hello"
|
||||
|
||||
@field_validator('x') # v2 code
|
||||
@classmethod
|
||||
def validate_x(cls, x: int) -> int:
|
||||
return 1
|
||||
|
||||
|
||||
CustomTool(
|
||||
name='custom_tool',
|
||||
description="hello",
|
||||
x=1,
|
||||
)
|
||||
```
|
||||
|
||||
**Example 2: Passing objects to LangChain**
|
||||
|
||||
**YES**
|
||||
|
||||
```python
|
||||
from langchain.tools.base import Tool
|
||||
from pydantic.v1 import BaseModel, Field # <-- Uses v1 namespace
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
question: str = Field()
|
||||
|
||||
Tool.from_function( # <-- tool uses v1 namespace
|
||||
func=lambda question: 'hello',
|
||||
name="Calculator",
|
||||
description="useful for when you need to answer questions about math",
|
||||
args_schema=CalculatorInput
|
||||
)
|
||||
```
|
||||
|
||||
**NO**
|
||||
|
||||
```python
|
||||
from langchain.tools.base import Tool
|
||||
from pydantic import BaseModel, Field # <-- Uses v2 namespace
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
question: str = Field()
|
||||
|
||||
Tool.from_function( # <-- tool uses v1 namespace
|
||||
func=lambda question: 'hello',
|
||||
name="Calculator",
|
||||
description="useful for when you need to answer questions about math",
|
||||
args_schema=CalculatorInput
|
||||
)
|
||||
```
|
||||
@@ -0,0 +1,105 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Microsoft SharePoint\n",
|
||||
"\n",
|
||||
"> [Microsoft SharePoint](https://en.wikipedia.org/wiki/SharePoint) is a website-based collaboration system that uses workflow applications, “list” databases, and other web parts and security features to empower business teams to work together developed by Microsoft.\n",
|
||||
"\n",
|
||||
"This notebook covers how to load documents from the [SharePoint Document Library](https://support.microsoft.com/en-us/office/what-is-a-document-library-3b5976dd-65cf-4c9e-bf5a-713c10ca2872). Currently, only docx, doc, and pdf files are supported.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"1. Register an application with the [Microsoft identity platform](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) instructions.\n",
|
||||
"2. When registration finishes, the Azure portal displays the app registration's Overview pane. You see the Application (client) ID. Also called the `client ID`, this value uniquely identifies your application in the Microsoft identity platform.\n",
|
||||
"3. During the steps you will be following at **item 1**, you can set the redirect URI as `https://login.microsoftonline.com/common/oauth2/nativeclient`\n",
|
||||
"4. During the steps you will be following at **item 1**, generate a new password (`client_secret`) under Application Secrets section.\n",
|
||||
"5. Follow the instructions at this [document](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-configure-app-expose-web-apis#add-a-scope) to add the following `SCOPES` (`offline_access` and `Sites.Read.All`) to your application.\n",
|
||||
"6. To retrieve files from your **Document Library**, you will need its ID. To obtain it, you will need values of `Tenant Name`, `Collection ID`, and `Subsite ID`.\n",
|
||||
"7. To find your `Tenant Name` follow the instructions at this [document](https://learn.microsoft.com/en-us/azure/active-directory-b2c/tenant-management-read-tenant-name). Once you got this, just remove `.onmicrosoft.com` from the value and hold the rest as your `Tenant Name`.\n",
|
||||
"8. To obtain your `Collection ID` and `Subsite ID`, you will need your **SharePoint** `site-name`. Your `SharePoint` site URL has the following format `https://<tenant-name>.sharepoint.com/sites/<site-name>`. The last part of this URL is the `site-name`.\n",
|
||||
"9. To Get the Site `Collection ID`, hit this URL in the browser: `https://<tenant>.sharepoint.com/sites/<site-name>/_api/site/id` and copy the value of the `Edm.Guid` property.\n",
|
||||
"10. To get the `Subsite ID` (or web ID) use: `https://<tenant>.sharepoint.com/<site-name>/_api/web/id` and copy the value of the `Edm.Guid` property.\n",
|
||||
"11. The `SharePoint site ID` has the following format: `<tenant-name>.sharepoint.com,<Collection ID>,<subsite ID>`. You can hold that value to use in the next step.\n",
|
||||
"12. Visit the [Graph Explorer Playground](https://developer.microsoft.com/en-us/graph/graph-explorer) to obtain your `Document Library ID`. The first step is to ensure you are logged in with the account associated with your **SharePoint** site. Then you need to make a request to `https://graph.microsoft.com/v1.0/sites/<SharePoint site ID>/drive` and the response will return a payload with a field `id` that holds the ID of your `Document Library ID`.\n",
|
||||
"\n",
|
||||
"## 🧑 Instructions for ingesting your documents from SharePoint Document Library\n",
|
||||
"\n",
|
||||
"### 🔑 Authentication\n",
|
||||
"\n",
|
||||
"By default, the `SharePointLoader` expects that the values of `CLIENT_ID` and `CLIENT_SECRET` must be stored as environment variables named `O365_CLIENT_ID` and `O365_CLIENT_SECRET` respectively. You could pass those environment variables through a `.env` file at the root of your application or using the following command in your script.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"os.environ['O365_CLIENT_ID'] = \"YOUR CLIENT ID\"\n",
|
||||
"os.environ['O365_CLIENT_SECRET'] = \"YOUR CLIENT SECRET\"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This loader uses an authentication called [*on behalf of a user*](https://learn.microsoft.com/en-us/graph/auth-v2-user?context=graph%2Fapi%2F1.0&view=graph-rest-1.0). It is a 2 step authentication with user consent. When you instantiate the loader, it will call will print a url that the user must visit to give consent to the app on the required permissions. The user must then visit this url and give consent to the application. Then the user must copy the resulting page url and paste it back on the console. The method will then return True if the login attempt was succesful.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain.document_loaders.sharepoint import SharePointLoader\n",
|
||||
"\n",
|
||||
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Once the authentication has been done, the loader will store a token (`o365_token.txt`) at `~/.credentials/` folder. This token could be used later to authenticate without the copy/paste steps explained earlier. To use this token for authentication, you need to change the `auth_with_token` parameter to True in the instantiation of the loader.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain.document_loaders.sharepoint import SharePointLoader\n",
|
||||
"\n",
|
||||
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", auth_with_token=True)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### 🗂️ Documents loader\n",
|
||||
"\n",
|
||||
"#### 📑 Loading documents from a Document Library Directory\n",
|
||||
"\n",
|
||||
"`SharePointLoader` can load documents from a specific folder within your Document Library. For instance, you want to load all documents that are stored at `Documents/marketing` folder within your Document Library.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain.document_loaders.sharepoint import SharePointLoader\n",
|
||||
"\n",
|
||||
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", folder_path=\"Documents/marketing\", auth_with_token=True)\n",
|
||||
"documents = loader.load()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### 📑 Loading documents from a list of Documents IDs\n",
|
||||
"\n",
|
||||
"Another possibility is to provide a list of `object_id` for each document you want to load. For that, you will need to query the [Microsoft Graph API](https://developer.microsoft.com/en-us/graph/graph-explorer) to find all the documents ID that you are interested in. This [link](https://learn.microsoft.com/en-us/graph/api/resources/onedrive?view=graph-rest-1.0#commonly-accessed-resources) provides a list of endpoints that will be helpful to retrieve the documents ID.\n",
|
||||
"\n",
|
||||
"For instance, to retrieve information about all objects that are stored at `data/finance/` folder, you need make a request to: `https://graph.microsoft.com/v1.0/drives/<document-library-id>/root:/data/finance:/children`. Once you have the list of IDs that you are interested in, then you can instantiate the loader with the following parameters.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain.document_loaders.sharepoint import SharePointLoader\n",
|
||||
"\n",
|
||||
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", object_ids=[\"ID_1\", \"ID_2\"], auth_with_token=True)\n",
|
||||
"documents = loader.load()\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -0,0 +1,878 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3cebbe-079a-4bfe-b1a1-07bdac882ce2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Amazon Textract \n",
|
||||
"\n",
|
||||
"Amazon Textract is a machine learning (ML) service that automatically extracts text, handwriting, and data from scanned documents. It goes beyond simple optical character recognition (OCR) to identify, understand, and extract data from forms and tables. Today, many companies manually extract data from scanned documents such as PDFs, images, tables, and forms, or through simple OCR software that requires manual configuration (which often must be updated when the form changes). To overcome these manual and expensive processes, Textract uses ML to read and process any type of document, accurately extracting text, handwriting, tables, and other data with no manual effort. You can quickly automate document processing and act on the information extracted, whether you’re automating loans processing or extracting information from invoices and receipts. Textract can extract the data in minutes instead of hours or days.\n",
|
||||
"\n",
|
||||
"This sample demonstrates the use of Amazon Textract in combination with LangChain as a DocumentLoader.\n",
|
||||
"\n",
|
||||
"Textract supports PDF, TIFF, PNG and JPEG format.\n",
|
||||
"\n",
|
||||
"Check https://docs.aws.amazon.com/textract/latest/dg/limits-document.html for supported document sizes, languages and characters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c049beaf-f904-4ce6-91ca-805da62084c2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.2.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install langchain boto3 openai tiktoken python-dotenv -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "400b25c6-befa-4730-a201-39ff112c8858",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sample 1\n",
|
||||
"\n",
|
||||
"The first example uses a local file, which internally will be send to Amazon Textract sync API [DetectDocumentText](https://docs.aws.amazon.com/textract/latest/dg/API_DetectDocumentText.html). \n",
|
||||
"\n",
|
||||
"Local files or URL endpoints like HTTP:// are limited to one page documents for Textract.\n",
|
||||
"Multi-page documents have to reside on S3. This sample file is a jpeg."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1becee92-e82f-42d4-9b4e-b23d77cbe88d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import AmazonTextractPDFLoader\n",
|
||||
"loader = AmazonTextractPDFLoader(\"example_data/alejandro_rosalez_sample-small.jpeg\")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d566dc56-c9a9-44ec-84fb-a81928f90d40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Output from the file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1272ce8c-d298-4059-ac0a-780bf5f82302",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No ', metadata={'source': 'example_data/alejandro_rosalez_sample-small.jpeg', 'page': 1})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4cf7f19c-3635-453a-9c76-4baf98b8d7f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sample 2\n",
|
||||
"The next sample loads a file from an HTTPS endpoint. \n",
|
||||
"It has to be single page, as Amazon Textract requires all multi-page documents to be stored on S3."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "10374bfb-b325-451f-8bd0-c686710ab68c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import AmazonTextractPDFLoader\n",
|
||||
"loader = AmazonTextractPDFLoader(\"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg\")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "16a2b6a3-7514-4c2c-a427-6847169af473",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No ', metadata={'source': 'example_data/alejandro_rosalez_sample-small.jpeg', 'page': 1})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a9cd8ec-e663-4dc7-9db1-d2f575253141",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sample 3\n",
|
||||
"\n",
|
||||
"Processing a multi-page document requires the document to be on S3. The sample document resides in a bucket in us-east-2 and Textract needs to be called in that same region to be successful, so we set the region_name on the client and pass that in to the loader to ensure Textract is called from us-east-2. You could also to have your notebook running in us-east-2, setting the AWS_DEFAULT_REGION set to us-east-2 or when running in a different environment, pass in a boto3 Textract client with that region name like in the cell below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "8185e3e6-9599-4a47-8969-d6dcef3e6404",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import boto3\n",
|
||||
"textract_client = boto3.client('textract', region_name='us-east-2')\n",
|
||||
"\n",
|
||||
"file_path = \"s3://amazon-textract-public-content/langchain/layout-parser-paper.pdf\"\n",
|
||||
"loader = AmazonTextractPDFLoader(file_path, client=textract_client)\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b8901eec-070d-4fd6-9d65-52211d332441",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now getting the number of pages to validate the response (printing out the full response would be quite long...). We expect 16 pages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b23c01c8-cf69-4fe2-8141-4621edb7d79c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"16"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3e41b4d-b159-4274-89be-80d8159134ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the AmazonTextractPDFLoader in an LangChain chain (e. g. OpenAI)\n",
|
||||
"\n",
|
||||
"The AmazonTextractPDFLoader can be used in a chain the same way the other loaders are used.\n",
|
||||
"Textract itself does have a [Query feature](https://docs.aws.amazon.com/textract/latest/dg/API_Query.html), which offers similar functionality to the QA chain in this sample, which is worth checking out as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "53c47b24-cc06-4256-9e5b-a82fc80bc55d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You can store your OPENAI_API_KEY in a .env file as well\n",
|
||||
"# import os \n",
|
||||
"# from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"# load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "a9ae004c-246c-4c7f-8458-191cd7424a9b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Or set the OpenAI key in the environment directly\n",
|
||||
"import os \n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your-OpenAI-API-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "d52b089c-10ca-45fb-8669-8a1c5fee10d5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The authors are Zejiang Shen, Ruochen Zhang, Melissa Dell, Benjamin Charles Germain Lee, Jacob Carlson, Weining Li, Gardner, M., Grus, J., Neumann, M., Tafjord, O., Dasigi, P., Liu, N., Peters, M., Schmitz, M., Zettlemoyer, L., Lukasz Garncarek, Powalski, R., Stanislawek, T., Topolski, B., Halama, P., Gralinski, F., Graves, A., Fernández, S., Gomez, F., Schmidhuber, J., Harley, A.W., Ufkes, A., Derpanis, K.G., He, K., Gkioxari, G., Dollár, P., Girshick, R., He, K., Zhang, X., Ren, S., Sun, J., Kay, A., Lamiroy, B., Lopresti, D., Mears, J., Jakeway, E., Ferriter, M., Adams, C., Yarasavage, N., Thomas, D., Zwaard, K., Li, M., Cui, L., Huang,'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"chain = load_qa_chain(llm=OpenAI(), chain_type=\"map_reduce\")\n",
|
||||
"query = [\"Who are the autors?\"]\n",
|
||||
"\n",
|
||||
"chain.run(input_documents=documents, question=query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a09d18b-ab7b-468e-ae66-f92abf666b9b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"availableInstances": [
|
||||
{
|
||||
"_defaultOrder": 0,
|
||||
"_isFastLaunch": true,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 4,
|
||||
"name": "ml.t3.medium",
|
||||
"vcpuNum": 2
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 1,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 8,
|
||||
"name": "ml.t3.large",
|
||||
"vcpuNum": 2
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 2,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.t3.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 3,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.t3.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 4,
|
||||
"_isFastLaunch": true,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 8,
|
||||
"name": "ml.m5.large",
|
||||
"vcpuNum": 2
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 5,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.m5.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 6,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.m5.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 7,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 64,
|
||||
"name": "ml.m5.4xlarge",
|
||||
"vcpuNum": 16
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 8,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 128,
|
||||
"name": "ml.m5.8xlarge",
|
||||
"vcpuNum": 32
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 9,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 192,
|
||||
"name": "ml.m5.12xlarge",
|
||||
"vcpuNum": 48
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 10,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 256,
|
||||
"name": "ml.m5.16xlarge",
|
||||
"vcpuNum": 64
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 11,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 384,
|
||||
"name": "ml.m5.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 12,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 8,
|
||||
"name": "ml.m5d.large",
|
||||
"vcpuNum": 2
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 13,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.m5d.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 14,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.m5d.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 15,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 64,
|
||||
"name": "ml.m5d.4xlarge",
|
||||
"vcpuNum": 16
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 16,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 128,
|
||||
"name": "ml.m5d.8xlarge",
|
||||
"vcpuNum": 32
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 17,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 192,
|
||||
"name": "ml.m5d.12xlarge",
|
||||
"vcpuNum": 48
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 18,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 256,
|
||||
"name": "ml.m5d.16xlarge",
|
||||
"vcpuNum": 64
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 19,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 384,
|
||||
"name": "ml.m5d.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 20,
|
||||
"_isFastLaunch": false,
|
||||
"category": "General purpose",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": true,
|
||||
"memoryGiB": 0,
|
||||
"name": "ml.geospatial.interactive",
|
||||
"supportedImageNames": [
|
||||
"sagemaker-geospatial-v1-0"
|
||||
],
|
||||
"vcpuNum": 0
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 21,
|
||||
"_isFastLaunch": true,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 4,
|
||||
"name": "ml.c5.large",
|
||||
"vcpuNum": 2
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 22,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 8,
|
||||
"name": "ml.c5.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 23,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.c5.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 24,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.c5.4xlarge",
|
||||
"vcpuNum": 16
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 25,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 72,
|
||||
"name": "ml.c5.9xlarge",
|
||||
"vcpuNum": 36
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 26,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 96,
|
||||
"name": "ml.c5.12xlarge",
|
||||
"vcpuNum": 48
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 27,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 144,
|
||||
"name": "ml.c5.18xlarge",
|
||||
"vcpuNum": 72
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 28,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Compute optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 192,
|
||||
"name": "ml.c5.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 29,
|
||||
"_isFastLaunch": true,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.g4dn.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 30,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.g4dn.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 31,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 64,
|
||||
"name": "ml.g4dn.4xlarge",
|
||||
"vcpuNum": 16
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 32,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 128,
|
||||
"name": "ml.g4dn.8xlarge",
|
||||
"vcpuNum": 32
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 33,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 4,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 192,
|
||||
"name": "ml.g4dn.12xlarge",
|
||||
"vcpuNum": 48
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 34,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 256,
|
||||
"name": "ml.g4dn.16xlarge",
|
||||
"vcpuNum": 64
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 35,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 61,
|
||||
"name": "ml.p3.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 36,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 4,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 244,
|
||||
"name": "ml.p3.8xlarge",
|
||||
"vcpuNum": 32
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 37,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 8,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 488,
|
||||
"name": "ml.p3.16xlarge",
|
||||
"vcpuNum": 64
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 38,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 8,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 768,
|
||||
"name": "ml.p3dn.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 39,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.r5.large",
|
||||
"vcpuNum": 2
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 40,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.r5.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 41,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 64,
|
||||
"name": "ml.r5.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 42,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 128,
|
||||
"name": "ml.r5.4xlarge",
|
||||
"vcpuNum": 16
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 43,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 256,
|
||||
"name": "ml.r5.8xlarge",
|
||||
"vcpuNum": 32
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 44,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 384,
|
||||
"name": "ml.r5.12xlarge",
|
||||
"vcpuNum": 48
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 45,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 512,
|
||||
"name": "ml.r5.16xlarge",
|
||||
"vcpuNum": 64
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 46,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Memory Optimized",
|
||||
"gpuNum": 0,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 768,
|
||||
"name": "ml.r5.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 47,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 16,
|
||||
"name": "ml.g5.xlarge",
|
||||
"vcpuNum": 4
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 48,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 32,
|
||||
"name": "ml.g5.2xlarge",
|
||||
"vcpuNum": 8
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 49,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 64,
|
||||
"name": "ml.g5.4xlarge",
|
||||
"vcpuNum": 16
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 50,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 128,
|
||||
"name": "ml.g5.8xlarge",
|
||||
"vcpuNum": 32
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 51,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 1,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 256,
|
||||
"name": "ml.g5.16xlarge",
|
||||
"vcpuNum": 64
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 52,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 4,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 192,
|
||||
"name": "ml.g5.12xlarge",
|
||||
"vcpuNum": 48
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 53,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 4,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 384,
|
||||
"name": "ml.g5.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 54,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 8,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 768,
|
||||
"name": "ml.g5.48xlarge",
|
||||
"vcpuNum": 192
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 55,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 8,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 1152,
|
||||
"name": "ml.p4d.24xlarge",
|
||||
"vcpuNum": 96
|
||||
},
|
||||
{
|
||||
"_defaultOrder": 56,
|
||||
"_isFastLaunch": false,
|
||||
"category": "Accelerated computing",
|
||||
"gpuNum": 8,
|
||||
"hideHardwareSpecs": false,
|
||||
"memoryGiB": 1152,
|
||||
"name": "ml.p4de.24xlarge",
|
||||
"vcpuNum": 96
|
||||
}
|
||||
],
|
||||
"instance_type": "ml.t3.medium",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -299,7 +299,7 @@
|
||||
"id": "1cf27fc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you need to post process the `unstructured` elements after extraction, you can pass in a list of `Element` -> `Element` functions to the `post_processors` kwarg when you instantiate the `UnstructuredFileLoader`. This applies to other Unstructured loaders as well. Below is an example. Post processors are only applied if you run the loader in `\"elements\"` mode."
|
||||
"If you need to post process the `unstructured` elements after extraction, you can pass in a list of `str` -> `str` functions to the `post_processors` kwarg when you instantiate the `UnstructuredFileLoader`. This applies to other Unstructured loaders as well. Below is an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -495,7 +495,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"\n",
|
||||
"This LLM showcases true potential of decentralized AI by giving you the best response(s) from the Bittensor protocol, which consist of various AI models such as OpenAI, LLaMA2 etc.\n",
|
||||
"\n",
|
||||
"Users can view their logs, requests, and API keys on the [Validator Endpoint Frontend](https://api.neuralinterent.ai/). However, changes to the configuration are currently prohibited; otherwise, the user's queries will be blocked.\n",
|
||||
"Users can view their logs, requests, and API keys on the [Validator Endpoint Frontend](https://api.neuralinternet.ai/). However, changes to the configuration are currently prohibited; otherwise, the user's queries will be blocked.\n",
|
||||
"\n",
|
||||
"If you encounter any difficulties or have any questions, please feel free to reach out to our developer on [GitHub](https://github.com/Kunj-2206), [Discord](https://discordapp.com/users/683542109248159777) or join our discord server for latest update and queries [Neural Internet](https://discord.gg/neuralinternet).\n"
|
||||
]
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "d772b637-de00-4663-bd77-9bc96d798db2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -135,7 +135,7 @@
|
||||
"id": "4c16fded-70d1-42af-8bfa-6ddda9f0bc63",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Flan, by Google"
|
||||
"### `Flan`, by `Google`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -178,7 +178,7 @@
|
||||
"id": "1a5c97af-89bc-4e59-95c1-223742a9160b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Dolly, by Databricks\n",
|
||||
"### `Dolly`, by `Databricks`\n",
|
||||
"\n",
|
||||
"See [Databricks](https://huggingface.co/databricks) organization page for a list of available models."
|
||||
]
|
||||
@@ -225,14 +225,14 @@
|
||||
"id": "03f6ae52-b5f9-4de6-832c-551cb3fa11ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Camel, by Writer\n",
|
||||
"### `Camel`, by `Writer`\n",
|
||||
"\n",
|
||||
"See [Writer's](https://huggingface.co/Writer) organization page for a list of available models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 11,
|
||||
"id": "257a091d-750b-4910-ac08-fe1c7b3fd98b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -261,7 +261,7 @@
|
||||
"id": "2bf838eb-1083-402f-b099-b07c452418c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### XGen, by Salesforce\n",
|
||||
"### `XGen`, by `Salesforce`\n",
|
||||
"\n",
|
||||
"See [more information](https://github.com/salesforce/xgen)."
|
||||
]
|
||||
@@ -295,7 +295,7 @@
|
||||
"id": "0aca9f9e-f333-449c-97b2-10d1dbf17e75",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Falcon, by Technology Innovation Institute (TII)\n",
|
||||
"### `Falcon`, by `Technology Innovation Institute (TII)`\n",
|
||||
"\n",
|
||||
"See [more information](https://huggingface.co/tiiuae/falcon-40b)."
|
||||
]
|
||||
@@ -323,6 +323,86 @@
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"print(llm_chain.run(question))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7e15849b-5561-4bb9-86ec-6412ca10196a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `InternLM-Chat`, by `Shanghai AI Laboratory`\n",
|
||||
"\n",
|
||||
"See [more information](https://huggingface.co/internlm/internlm-7b)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "3b533461-59f8-406e-907b-000841fa60a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"repo_id = \"internlm/internlm-chat-7b\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c71210b9-5895-41a2-889a-f430d22fa1aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = HuggingFaceHub(\n",
|
||||
" repo_id=repo_id, model_kwargs={\"max_length\": 128, \"temperature\": 0.8}\n",
|
||||
")\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"print(llm_chain.run(question))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f2e5132-1713-42d7-919a-8c313744ce95",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `Qwen`, by `Alibaba Cloud`\n",
|
||||
"\n",
|
||||
">`Tongyi Qianwen-7B` (`Qwen-7B`) is a model with a scale of 7 billion parameters in the `Tongyi Qianwen` large model series developed by `Alibaba Cloud`. `Qwen-7B` is a large language model based on Transformer, which is trained on ultra-large-scale pre-training data.\n",
|
||||
"\n",
|
||||
"See [more information on HuggingFace](https://huggingface.co/Qwen/Qwen-7B) of on [GitHub](https://github.com/QwenLM/Qwen-7B).\n",
|
||||
"\n",
|
||||
"See here a [big example for LangChain integration and Qwen](https://github.com/QwenLM/Qwen-7B/blob/main/examples/langchain_tooluse.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "f598b1ca-77c7-40f1-a83f-c21ea9910c88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"repo_id = \"Qwen/Qwen-7B\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2c97f4e2-d401-44fb-9da7-b60b2e2cc663",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = HuggingFaceHub(\n",
|
||||
" repo_id=repo_id, model_kwargs={\"max_length\": 128, \"temperature\": 0.5}\n",
|
||||
")\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"print(llm_chain.run(question))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1dd67c1e-1efc-4def-bde4-2e5265725303",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -341,7 +421,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -21,19 +21,19 @@
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"To use, you should have the ``transformers`` python [package installed](https://pypi.org/project/transformers/)."
|
||||
"To use, you should have the ``transformers`` python [package installed](https://pypi.org/project/transformers/), as well as [pytorch](https://pytorch.org/get-started/locally/). You can also install `xformer` for a more memory-efficient attention implementation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "d772b637-de00-4663-bd77-9bc96d798db2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install transformers > /dev/null"
|
||||
"%pip install transformers --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -46,22 +46,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"id": "165ae236-962a-4763-8052-c4836d78a5d2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:root:Failed to default session, using empty session: HTTPConnectionPool(host='localhost', port=8000): Max retries exceeded with url: /sessions (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x1117f9790>: Failed to establish a new connection: [Errno 61] Connection refused'))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import HuggingFacePipeline\n",
|
||||
"from langchain.llms import HuggingFacePipeline\n",
|
||||
"\n",
|
||||
"llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"bigscience/bloom-1b7\",\n",
|
||||
@@ -75,24 +67,18 @@
|
||||
"id": "00104b27-0c15-4a97-b198-4512337ee211",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Integrate the model in an LLMChain"
|
||||
"### Create Chain\n",
|
||||
"\n",
|
||||
"With the model loaded into memory, you can compose it with a prompt to\n",
|
||||
"form a chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 7,
|
||||
"id": "3acf0069",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/wfh/code/lc/lckg/.venv/lib/python3.11/site-packages/transformers/generation/utils.py:1288: UserWarning: Using `max_length`'s default (64) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
|
||||
" warnings.warn(\n",
|
||||
"WARNING:root:Failed to persist run: HTTPConnectionPool(host='localhost', port=8000): Max retries exceeded with url: /chain-runs (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x144d06910>: Failed to establish a new connection: [Errno 61] Connection refused'))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
@@ -102,27 +88,19 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(llm_chain.run(question))"
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "843a3837",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
" typical_p=0.95,\n",
|
||||
" temperature=0.01,\n",
|
||||
" repetition_penalty=1.03,\n",
|
||||
" stream=True\n",
|
||||
" streaming=True\n",
|
||||
")\n",
|
||||
"llm(\"What did foo say about bar?\", callbacks=[StreamingStdOutCallbackHandler()])"
|
||||
]
|
||||
|
||||
214
docs/extras/integrations/llms/promptguard.ipynb
Normal file
214
docs/extras/integrations/llms/promptguard.ipynb
Normal file
@@ -0,0 +1,214 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PromptGuard\n",
|
||||
"\n",
|
||||
"[PromptGuard](https://promptguard.readthedocs.io/en/latest/) is a service that enables applications to leverage the power of language models without compromising user privacy. Designed for composability and ease of integration into existing applications and services, PromptGuard is consumable via a simple Python library as well as through LangChain. Perhaps more importantly, PromptGuard leverages the power of [confidential computing](https://en.wikipedia.org/wiki/Confidential_computing) to ensure that even the PromptGuard service itself cannot access the data it is protecting.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"This notebook goes over how to use LangChain to interact with `PromptGuard`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install the promptguard and langchain packages\n",
|
||||
"! pip install promptguard langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Accessing the PromptGuard API requires an API key, which you can get by creating an account on [the PromptGuard website](https://promptguard.opaque.co/). Once you have an account, you can find your API key on [the API Keys page](https://promptguard.opaque.co/api-keys)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Set API keys\n",
|
||||
"\n",
|
||||
"os.environ['PROMPT_GUARD_API_KEY'] = \"<PROMPT_GUARD_API_KEY>\"\n",
|
||||
"os.environ['OPENAI_API_KEY'] = \"<OPENAI_API_KEY>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Use PromptGuardLLMWrapper\n",
|
||||
"\n",
|
||||
"Applying promptguard to your application could be as simple as wrapping your LLM using the PromptGuardLLMWrapper class by replace `llm=OpenAI()` with `llm=PromptGuardLLMWrapper(OpenAI())`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain import LLMChain, PromptTemplate\n",
|
||||
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"\n",
|
||||
"from langchain.llms import PromptGuardLLMWrapper\n",
|
||||
"\n",
|
||||
"langchain.verbose = True\n",
|
||||
"langchain.debug = True\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"\n",
|
||||
"As an AI assistant, you will answer questions according to given context.\n",
|
||||
"\n",
|
||||
"Sensitive personal information in the question is masked for privacy.\n",
|
||||
"For instance, if the original text says \"Giana is good,\" it will be changed\n",
|
||||
"to \"PERSON_998 is good.\" \n",
|
||||
"\n",
|
||||
"Here's how to handle these changes:\n",
|
||||
"* Consider these masked phrases just as placeholders, but still refer to\n",
|
||||
"them in a relevant way when answering.\n",
|
||||
"* It's possible that different masked terms might mean the same thing.\n",
|
||||
"Stick with the given term and don't modify it.\n",
|
||||
"* All masked terms follow the \"TYPE_ID\" pattern.\n",
|
||||
"* Please don't invent new masked terms. For instance, if you see \"PERSON_998,\"\n",
|
||||
"don't come up with \"PERSON_997\" or \"PERSON_999\" unless they're already in the question.\n",
|
||||
"\n",
|
||||
"Conversation History: ```{history}```\n",
|
||||
"Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,\n",
|
||||
"John Doe provided me with his personal details. His email is johndoe@example.com\n",
|
||||
"and his contact number is 650-456-7890. He lives in New York City, USA, and\n",
|
||||
"belongs to the American nationality with Christian beliefs and a leaning towards\n",
|
||||
"the Democratic party. He mentioned that he recently made a transaction using his\n",
|
||||
"credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address\n",
|
||||
"1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted\n",
|
||||
"down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website\n",
|
||||
"as https://johndoeportfolio.com. John also discussed some of his US-specific details.\n",
|
||||
"He said his bank account number is 1234567890123456 and his drivers license is Y12345678.\n",
|
||||
"His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is\n",
|
||||
"123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he\n",
|
||||
"mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has\n",
|
||||
"a medical license number MED-123456. ```\n",
|
||||
"Question: ```{question}```\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"chain = LLMChain(\n",
|
||||
" prompt=PromptTemplate.from_template(prompt_template),\n",
|
||||
" llm=PromptGuardLLMWrapper(llm=OpenAI()),\n",
|
||||
" memory=ConversationBufferWindowMemory(k=2),\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(\n",
|
||||
" chain.run(\n",
|
||||
" {\"question\": \"\"\"Write a message to remind John to do password reset for his website to stay secure.\"\"\"},\n",
|
||||
" callbacks=[StdOutCallbackHandler()],\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From the output, you can see the following context from user input has sensitive data.\n",
|
||||
"\n",
|
||||
"``` \n",
|
||||
"# Context from user input\n",
|
||||
"\n",
|
||||
"During our recent meeting on February 23, 2023, at 10:30 AM, John Doe provided me with his personal details. His email is johndoe@example.com and his contact number is 650-456-7890. He lives in New York City, USA, and belongs to the American nationality with Christian beliefs and a leaning towards the Democratic party. He mentioned that he recently made a transaction using his credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website as https://johndoeportfolio.com. John also discussed some of his US-specific details. He said his bank account number is 1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is 123456789. He emphasized not to share his SSN, which is 669-45-6789. Furthermore, he mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has a medical license number MED-123456.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"PromptGuard will automatically detect the sensitive data and replace it with a placeholder. \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"# Context after PromptGuard\n",
|
||||
"\n",
|
||||
"During our recent meeting on DATE_TIME_3, at DATE_TIME_2, PERSON_3 provided me with his personal details. His email is EMAIL_ADDRESS_1 and his contact number is PHONE_NUMBER_1. He lives in LOCATION_3, LOCATION_2, and belongs to the NRP_3 nationality with NRP_2 beliefs and a leaning towards the Democratic party. He mentioned that he recently made a transaction using his credit card CREDIT_CARD_1 and transferred bitcoins to the wallet address CRYPTO_1. While discussing his NRP_1 travels, he noted down his IBAN as IBAN_CODE_1. Additionally, he provided his website as URL_1. PERSON_2 also discussed some of his LOCATION_1-specific details. He said his bank account number is US_BANK_NUMBER_1 and his drivers license is US_DRIVER_LICENSE_2. His ITIN is US_ITIN_1, and he recently renewed his passport, the number for which is DATE_TIME_1. He emphasized not to share his SSN, which is US_SSN_1. Furthermore, he mentioned that he accesses his work files remotely through the IP IP_ADDRESS_1 and has a medical license number MED-US_DRIVER_LICENSE_1.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Placeholder is used in the LLM response.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"# response returned by LLM\n",
|
||||
"\n",
|
||||
"Hey PERSON_1, just wanted to remind you to do a password reset for your website URL_1 through your email EMAIL_ADDRESS_1. It's important to stay secure online, so don't forget to do it!\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Response is desanitized by replacing the placeholder with the original sensitive data.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"# desanitized LLM response from PromptGuard\n",
|
||||
"\n",
|
||||
"Hey John, just wanted to remind you to do a password reset for your website https://johndoeportfolio.com through your email johndoe@example.com. It's important to stay secure online, so don't forget to do it!\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Use PromptGuard in LangChain expression\n",
|
||||
"\n",
|
||||
"There are functions that can be used with LangChain expression as well if a drop-in replacement doesn't offer the flexibility you need. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain.utilities.promptguard as pgf\n",
|
||||
"from langchain.schema.runnable import RunnableMap\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt=PromptTemplate.from_template(prompt_template), \n",
|
||||
"llm = OpenAI()\n",
|
||||
"pg_chain = (\n",
|
||||
" pgf.sanitize\n",
|
||||
" | RunnableMap(\n",
|
||||
" {\n",
|
||||
" \"response\": (lambda x: x[\"sanitized_input\"])\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser(),\n",
|
||||
" \"secure_context\": lambda x: x[\"secure_context\"],\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" | (lambda x: pgf.desanitize(x[\"response\"], x[\"secure_context\"]))\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pg_chain.invoke({\"question\": \"Write a text message to remind John to do password reset for his website through his email to stay secure.\", \"history\": \"\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -26,7 +26,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -61,6 +61,71 @@
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming Version"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You should install websocket-client to use this feature.\n",
|
||||
"`pip install websocket-client`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_url = \"ws://localhost:5005\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"from langchain.llms import TextGen\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"langchain.debug = True\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm = TextGen(model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = TextGen(\n",
|
||||
" model_url = model_url,\n",
|
||||
" streaming=True\n",
|
||||
")\n",
|
||||
"for chunk in llm.stream(\"Ask 'Hi, how are you?' like a pirate:'\",\n",
|
||||
" stop=[\"'\",\"\\n\"]):\n",
|
||||
" print(chunk, end='', flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -79,7 +144,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.7"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -37,7 +37,7 @@ There is a Clarifai Embedding model in LangChain, which you can access with:
|
||||
from langchain.embeddings import ClarifaiEmbeddings
|
||||
embeddings = ClarifaiEmbeddings(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
|
||||
```
|
||||
For more details, the docs on the Clarifai Embeddings wrapper provide a [detailed walthrough](/docs/integrations/text_embedding/clarifai.html).
|
||||
For more details, the docs on the Clarifai Embeddings wrapper provide a [detailed walkthrough](/docs/integrations/text_embedding/clarifai.html).
|
||||
|
||||
## Vectorstore
|
||||
|
||||
@@ -49,4 +49,4 @@ You an also add data directly from LangChain as well, and the auto-indexing will
|
||||
from langchain.vectorstores import Clarifai
|
||||
clarifai_vector_db = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=texts, pat=CLARIFAI_PAT, number_of_docs=NUMBER_OF_DOCS, metadatas = metadatas)
|
||||
```
|
||||
For more details, the docs on the Clarifai vector store provide a [detailed walthrough](/docs/integrations/text_embedding/clarifai.html).
|
||||
For more details, the docs on the Clarifai vector store provide a [detailed walkthrough](/docs/integrations/text_embedding/clarifai.html).
|
||||
|
||||
23
docs/extras/integrations/providers/epsilla.mdx
Normal file
23
docs/extras/integrations/providers/epsilla.mdx
Normal file
@@ -0,0 +1,23 @@
|
||||
# Epsilla
|
||||
|
||||
This page covers how to use [Epsilla](https://github.com/epsilla-cloud/vectordb) within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Epsilla wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the Python SDK with `pip/pip3 install pyepsilla`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around Epsilla vector databases, allowing you to use it as a vectorstore,
|
||||
whether for semantic search or example selection.
|
||||
|
||||
To import this vectorstore:
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Epsilla
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Epsilla wrapper, see [this notebook](/docs/integrations/vectorstores/epsilla.html)
|
||||
@@ -130,9 +130,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"USER_ID = \"openai\"\n",
|
||||
"APP_ID = \"embed\"\n",
|
||||
"MODEL_ID = \"text-embedding-ada\"\n",
|
||||
"USER_ID = \"salesforce\"\n",
|
||||
"APP_ID = \"blip\"\n",
|
||||
"MODEL_ID = \"multimodal-embedder-blip-2\"\n",
|
||||
"\n",
|
||||
"# You can provide a specific model version as the model_version_id arg.\n",
|
||||
"# MODEL_VERSION_ID = \"MODEL_VERSION_ID\""
|
||||
|
||||
60
docs/extras/integrations/text_embedding/ernie.ipynb
Normal file
60
docs/extras/integrations/text_embedding/ernie.ipynb
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ERNIE Embedding-V1\n",
|
||||
"\n",
|
||||
"[ERNIE Embedding-V1](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/alj562vvu) is a text representation model based on Baidu Wenxin's large-scale model technology, \n",
|
||||
"which converts text into a vector form represented by numerical values, and is used in text retrieval, information recommendation, knowledge mining and other scenarios."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import ErnieEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = ErnieEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(\"foo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_results = embeddings.embed_documents([\"foo\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -53,7 +53,15 @@
|
||||
"execution_count": 1,
|
||||
"id": "c1e38361-c1fe-4ac6-86e9-c90ebaf7ae87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Please login and get your API key from https://clarifai.com/settings/security\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -61,18 +69,9 @@
|
||||
"CLARIFAI_PAT = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "320af802-9271-46ee-948f-d2453933d44b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -99,7 +98,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "4d853395",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -134,7 +133,7 @@
|
||||
" \"I love playing soccer with my friends\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"metadatas = [{\"id\": i, \"text\": text} for i, text in enumerate(texts)]"
|
||||
"metadatas = [{\"id\": i, \"text\": text, \"source\": \"book 1\", \"category\": [\"books\", \"modern\"]} for i, text in enumerate(texts)]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -156,21 +155,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": null,
|
||||
"id": "e755cdce",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='I really enjoy spending time with you', metadata={'text': 'I really enjoy spending time with you', 'id': 0.0}),\n",
|
||||
" Document(page_content='I went to the movies yesterday', metadata={'text': 'I went to the movies yesterday', 'id': 3.0}),\n",
|
||||
" Document(page_content='zab', metadata={'page': '2'}),\n",
|
||||
" Document(page_content='zab', metadata={'page': '2'})]"
|
||||
"[Document(page_content='I really enjoy spending time with you', metadata={'text': 'I really enjoy spending time with you', 'id': 0.0, 'source': 'book 1', 'category': ['books', 'modern']}),\n",
|
||||
" Document(page_content='I went to the movies yesterday', metadata={'text': 'I went to the movies yesterday', 'id': 3.0, 'source': 'book 1', 'category': ['books', 'modern']})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
@@ -179,6 +174,21 @@
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "140103ec-0936-454a-9f4a-7d5beefc138f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# There is lots powerful filtering you can do within an app by leveraging metadata filters. \n",
|
||||
"# This one will limit the similarity query to only the texts that have key of \"source\" matching value of \"book 1\"\n",
|
||||
"book1_similar_docs = clarifai_vector_db.similarity_search(\"I would love to see you\", filter={\"source\": \"book 1\"})\n",
|
||||
"\n",
|
||||
"# you can also use lists in the input's metadata and then select things that match an item in the list. This is useful for categories like below:\n",
|
||||
"book_category_similar_docs = clarifai_vector_db.similarity_search(\"I would love to see you\", filter={\"category\": [\"books\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
@@ -249,7 +259,7 @@
|
||||
" user_id=USER_ID,\n",
|
||||
" app_id=APP_ID,\n",
|
||||
" documents=docs,\n",
|
||||
" pat=CLARIFAI_PAT_KEY,\n",
|
||||
" pat=CLARIFAI_PAT,\n",
|
||||
" number_of_docs=NUMBER_OF_DOCS,\n",
|
||||
")"
|
||||
]
|
||||
@@ -278,6 +288,55 @@
|
||||
"docs = clarifai_vector_db.similarity_search(\"Texts related to criminals and violence\")\n",
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7b332ca4-416b-4ea6-99da-b6949f399d72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## From existing App\n",
|
||||
"Within Clarifai we have great tools for adding data to applications (essentially projects) via API or UI. Most users will already have done that before interacting with LangChain so this example will use the data in an existing app to perform searches. Check out our [API docs](https://docs.clarifai.com/api-guide/data/create-get-update-delete) and [UI docs](https://docs.clarifai.com/portal-guide/data). The Clarifai Application can then be used for semantic search to find relevant documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "807c1141-591b-436d-abaa-f2c325e66d39",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"USER_ID = \"USERNAME_ID\"\n",
|
||||
"APP_ID = \"APPLICATION_ID\"\n",
|
||||
"NUMBER_OF_DOCS = 4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "762d74ef-f7df-43d6-b121-4980c4059fc0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"clarifai_vector_db = Clarifai(\n",
|
||||
" user_id=USER_ID,\n",
|
||||
" app_id=APP_ID,\n",
|
||||
" documents=docs,\n",
|
||||
" pat=CLARIFAI_PAT,\n",
|
||||
" number_of_docs=NUMBER_OF_DOCS,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f7636b0f-68ab-4b8f-ba0f-3c27061e3631",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = clarifai_vector_db.similarity_search(\"Texts related to criminals and violence\")\n",
|
||||
"docs"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
160
docs/extras/integrations/vectorstores/epsilla.ipynb
Normal file
160
docs/extras/integrations/vectorstores/epsilla.ipynb
Normal file
@@ -0,0 +1,160 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Epsilla\n",
|
||||
"\n",
|
||||
">[Epsilla](https://www.epsilla.com) is an open-source vector database that leverages the advanced parallel graph traversal techniques for vector indexing. Epsilla is licensed under GPL-3.0.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the functionalities related to the `Epsilla` vector database.\n",
|
||||
"\n",
|
||||
"As a prerequisite, you need to have a running Epsilla vector database (for example, through our docker image), and install the ``pyepsilla`` package. View full docs at [docs](https://epsilla-inc.gitbook.io/epsilladb/quick-start)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip/pip3 install pyepsilla"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"OpenAI API Key: ········"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Epsilla"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"documents = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0).split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Epsilla vectordb is running with default host \"localhost\" and port \"8888\". We have a custom db path, db name and collection name instead of the default ones."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pyepsilla import vectordb\n",
|
||||
"\n",
|
||||
"client = vectordb.Client()\n",
|
||||
"vector_store = Epsilla.from_documents(\n",
|
||||
" documents,\n",
|
||||
" embeddings,\n",
|
||||
" client,\n",
|
||||
" db_path=\"/tmp/mypath\",\n",
|
||||
" db_name=\"MyDB\",\n",
|
||||
" collection_name=\"MyCollection\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = vector_store.similarity_search(query)\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections.\n",
|
||||
"\n",
|
||||
"We cannot let this happen.\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections.\n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.\n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
"- destination_chains: chains that the router chain can route to\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"In this notebook we will focus on the different types of routing chains. We will show these routing chains used in a `MultiPromptChain` to create a question-answering chain that selects the prompt which is most relevant for a given question, and then answers the question using that prompt."
|
||||
"In this notebook, we will focus on the different types of routing chains. We will show these routing chains used in a `MultiPromptChain` to create a question-answering chain that selects the prompt which is most relevant for a given question, and then answers the question using that prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -195,7 +195,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
||||
"math: {'input': 'What is the first prime number greater than 40 such that one plus the prime number is divisible by 3'}\n",
|
||||
"math: {'input': 'What is the first prime number greater than 40 such that one plus the prime number is divisible by 3?'}\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
@@ -206,7 +206,7 @@
|
||||
"source": [
|
||||
"print(\n",
|
||||
" chain.run(\n",
|
||||
" \"What is the first prime number greater than 40 such that one plus the prime number is divisible by 3\"\n",
|
||||
" \"What is the first prime number greater than 40 such that one plus the prime number is divisible by 3?\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
@@ -231,7 +231,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(chain.run(\"What is the name of the type of cloud that rins\"))"
|
||||
"print(chain.run(\"What is the name of the type of cloud that rains?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -342,7 +342,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
||||
"math: {'input': 'What is the first prime number greater than 40 such that one plus the prime number is divisible by 3'}\n",
|
||||
"math: {'input': 'What is the first prime number greater than 40 such that one plus the prime number is divisible by 3?'}\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
@@ -353,7 +353,7 @@
|
||||
"source": [
|
||||
"print(\n",
|
||||
" chain.run(\n",
|
||||
" \"What is the first prime number greater than 40 such that one plus the prime number is divisible by 3\"\n",
|
||||
" \"What is the first prime number greater than 40 such that one plus the prime number is divisible by 3?\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"source": [
|
||||
"## Retrieving Full Documents\n",
|
||||
"\n",
|
||||
"In this mode, we want to retrieve the full documents. Therefor, we only specify a child splitter."
|
||||
"In this mode, we want to retrieve the full documents. Therefore, we only specify a child splitter."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -106,7 +106,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.add_documents(docs)"
|
||||
"retriever.add_documents(docs, ids=None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -144,7 +144,7 @@
|
||||
"id": "f895d62b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now call the vectorstore search functionality - we should see that it returns small chunks (since we're storing the small chunks"
|
||||
"Let's now call the vectorstore search functionality - we should see that it returns small chunks (since we're storing the small chunks)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -432,7 +432,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -544,7 +544,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt_template = FeatureformPrompTemplate(input_variables=[\"user_id\"])"
|
||||
"prompt_template = FeatureformPromptTemplate(input_variables=[\"user_id\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -282,7 +282,7 @@
|
||||
"user_agent.reset()\n",
|
||||
"\n",
|
||||
"# Initialize chats\n",
|
||||
"assistant_msg = HumanMessage(\n",
|
||||
"user_msg = HumanMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"{user_sys_msg.content}. \"\n",
|
||||
" \"Now start to give me introductions one by one. \"\n",
|
||||
@@ -290,8 +290,8 @@
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"user_msg = assistant_agent.step(user_msg)"
|
||||
"assistant_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"assistant_msg = assistant_agent.step(user_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -282,7 +282,7 @@
|
||||
"user_agent.reset()\n",
|
||||
"\n",
|
||||
"# Initialize chats\n",
|
||||
"assistant_msg = HumanMessage(\n",
|
||||
"user_msg = HumanMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"{user_sys_msg.content}. \"\n",
|
||||
" \"Now start to give me introductions one by one. \"\n",
|
||||
@@ -290,8 +290,8 @@
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"user_msg = assistant_agent.step(user_msg)"
|
||||
"assistant_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"assistant_msg = assistant_agent.step(user_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -123,7 +123,7 @@
|
||||
"wikidata_user_agent_header = (\n",
|
||||
" None\n",
|
||||
" if not config.has_section(\"WIKIDATA\")\n",
|
||||
" else config[\"WIKIDATA\"][\"WIKIDAtA_USER_AGENT_HEADER\"]\n",
|
||||
" else config[\"WIKIDATA\"][\"WIKIDATA_USER_AGENT_HEADER\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"source": [
|
||||
"## Quickstart \n",
|
||||
"\n",
|
||||
"Many APIs already are compatible with OpenAI function calling.\n",
|
||||
"Many APIs are already compatible with OpenAI function calling.\n",
|
||||
"\n",
|
||||
"For example, [Klarna](https://www.klarna.com/international/press/klarna-brings-smoooth-shopping-to-chatgpt/) has a YAML file that describes its API and allows OpenAI to interact with it:\n",
|
||||
"\n",
|
||||
@@ -145,7 +145,7 @@
|
||||
"source": [
|
||||
"## Functions \n",
|
||||
"\n",
|
||||
"We can unpack what is hapening when we use the funtions to calls external APIs.\n",
|
||||
"We can unpack what is hapening when we use the functions to calls external APIs.\n",
|
||||
"\n",
|
||||
"Let's look at the [LangSmith trace](https://smith.langchain.com/public/76a58b85-193f-4eb7-ba40-747f0d5dd56e/r):\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Chatbots\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/chatbots/chatbots.ipynb)\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/chatbots.ipynb)\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
|
||||
@@ -130,7 +130,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Splittng\n",
|
||||
"### Splitting\n",
|
||||
"\n",
|
||||
"Split the `Document` into chunks for embedding and vector storage.\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import PALChain\n",
|
||||
"from langchain_experimental.pal_chain import PALChain\n",
|
||||
"from langchain import OpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
695
docs/extras/use_cases/more/graph/graph_memgraph_qa.ipynb
Normal file
695
docs/extras/use_cases/more/graph/graph_memgraph_qa.ipynb
Normal file
@@ -0,0 +1,695 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "311b3061",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memgraph QA chain\n",
|
||||
"This notebook shows how to use LLMs to provide a natural language interface to a [Memgraph](https://github.com/memgraph/memgraph) database. To complete this tutorial, you will need [Docker](https://www.docker.com/get-started/) and [Python 3.x](https://www.python.org/) installed.\n",
|
||||
"\n",
|
||||
"To follow along with this tutorial, ensure you have a running Memgraph instance. You can download and run it in a local Docker container by executing the following script:\n",
|
||||
"```\n",
|
||||
"docker run \\\n",
|
||||
" -it \\\n",
|
||||
" -p 7687:7687 \\\n",
|
||||
" -p 7444:7444 \\\n",
|
||||
" -p 3000:3000 \\\n",
|
||||
" -e MEMGRAPH=\"--bolt-server-name-for-init=Neo4j/\" \\\n",
|
||||
" -v mg_lib:/var/lib/memgraph memgraph/memgraph-platform\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You will need to wait a few seconds for the database to start. If the process completes successfully, you should see something like this:\n",
|
||||
"```\n",
|
||||
"mgconsole X.X\n",
|
||||
"Connected to 'memgraph://127.0.0.1:7687'\n",
|
||||
"Type :help for shell usage\n",
|
||||
"Quit the shell by typing Ctrl-D(eof) or :quit\n",
|
||||
"memgraph>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Now you can start playing with Memgraph!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45ee105e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Begin by installing and importing all the necessary packages. We'll use the package manager called [pip](https://pip.pypa.io/en/stable/installation/), along with the `--user` flag, to ensure proper permissions. If you've installed Python 3.4 or a later version, pip is included by default. You can install all the required packages using the following command:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fd6b9672",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install langchain openai neo4j gqlalchemy --user"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ec969a02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can either run the provided code blocks in this notebook or use a separate Python file to experiment with Memgraph and LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8206f90d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import GraphCypherQAChain\n",
|
||||
"from langchain.graphs import MemgraphGraph\n",
|
||||
"from langchain import PromptTemplate\n",
|
||||
"\n",
|
||||
"from gqlalchemy import Memgraph\n",
|
||||
"\n",
|
||||
"import os"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95ba37a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We're utilizing the Python library [GQLAlchemy](https://github.com/memgraph/gqlalchemy) to establish a connection between our Memgraph database and Python script. To execute queries, we can set up a Memgraph instance as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b90c9cf8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memgraph = Memgraph(host='127.0.0.1', port=7687)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c379d16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Populating the database\n",
|
||||
"You can effortlessly populate your new, empty database using the Cypher query language. Don't worry if you don't grasp every line just yet, you can learn Cypher from the documentation [here](https://memgraph.com/docs/cypher-manual/). Running the following script will execute a seeding query on the database, giving us data about a video game, including details like the publisher, available platforms, and genres. This data will serve as a basis for our work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "11922bdf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Creating and executing the seeding query\n",
|
||||
"query = \"\"\"\n",
|
||||
" MERGE (g:Game {name: \"Baldur's Gate 3\"})\n",
|
||||
" WITH g, [\"PlayStation 5\", \"Mac OS\", \"Windows\", \"Xbox Series X/S\"] AS platforms,\n",
|
||||
" [\"Adventure\", \"Role-Playing Game\", \"Strategy\"] AS genres\n",
|
||||
" FOREACH (platform IN platforms |\n",
|
||||
" MERGE (p:Platform {name: platform})\n",
|
||||
" MERGE (g)-[:AVAILABLE_ON]->(p)\n",
|
||||
" )\n",
|
||||
" FOREACH (genre IN genres |\n",
|
||||
" MERGE (gn:Genre {name: genre})\n",
|
||||
" MERGE (g)-[:HAS_GENRE]->(gn)\n",
|
||||
" )\n",
|
||||
" MERGE (p:Publisher {name: \"Larian Studios\"})\n",
|
||||
" MERGE (g)-[:PUBLISHED_BY]->(p);\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"memgraph.execute(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "378db965",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Refresh graph schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6b37df3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You're all set to instantiate the Memgraph-LangChain graph using the following script. This interface will allow us to query our database using LangChain, automatically creating the required graph schema for generating Cypher queries through LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f38bbe83",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph = MemgraphGraph(url=\"bolt://localhost:7687\", username=\"\", password=\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "846c32a8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If necessary, you can manually refresh the graph schema as follows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b561026e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph.refresh_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c51b7948",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To familiarize yourself with the data and verify the updated graph schema, you can print it using the following statement."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f2e0ec3e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(graph.get_schema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a0c2a556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"Node properties are the following:\n",
|
||||
"Node name: 'Game', Node properties: [{'property': 'name', 'type': 'str'}]\n",
|
||||
"Node name: 'Platform', Node properties: [{'property': 'name', 'type': 'str'}]\n",
|
||||
"Node name: 'Genre', Node properties: [{'property': 'name', 'type': 'str'}]\n",
|
||||
"Node name: 'Publisher', Node properties: [{'property': 'name', 'type': 'str'}]\n",
|
||||
"\n",
|
||||
"Relationship properties are the following:\n",
|
||||
"\n",
|
||||
"The relationships are the following:\n",
|
||||
"['(:Game)-[:AVAILABLE_ON]->(:Platform)']\n",
|
||||
"['(:Game)-[:HAS_GENRE]->(:Genre)']\n",
|
||||
"['(:Game)-[:PUBLISHED_BY]->(:Publisher)']\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "44d3a1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Querying the database"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8aedfd63",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To interact with the OpenAI API, you must configure your API key as an environment variable using the Python [os](https://docs.python.org/3/library/os.html) package. This ensures proper authorization for your requests. You can find more information on obtaining your API key [here](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b8385c72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your-key-here\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5a74565a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You should create the graph chain using the following script, which will be utilized in the question-answering process based on your graph data. While it defaults to GPT-3.5-turbo, you might also consider experimenting with other models like [GPT-4](https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4) for notably improved Cypher queries and outcomes. We'll utilize the OpenAI chat, utilizing the key you previously configured. We'll set the temperature to zero, ensuring predictable and consistent answers. Additionally, we'll use our Memgraph-LangChain graph and set the verbose parameter, which defaults to False, to True to receive more detailed messages regarding query generation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4a3a5f2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, model_name='gpt-3.5-turbo'\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "949de4f3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can start asking questions!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b7aea263",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain.run(\"Which platforms is Baldur's Gate 3 available on?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a06a8164",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (g:Game {name: 'Baldur\\'s Gate 3'})-[:AVAILABLE_ON]->(p:Platform)\n",
|
||||
"RETURN p.name\n",
|
||||
"Full Context:\n",
|
||||
"[{'p.name': 'PlayStation 5'}, {'p.name': 'Mac OS'}, {'p.name': 'Windows'}, {'p.name': 'Xbox Series X/S'}]\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"Baldur's Gate 3 is available on PlayStation 5, Mac OS, Windows, and Xbox Series X/S.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59d298d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain.run(\"Is Baldur's Gate 3 available on Windows?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "99dd783c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (:Game {name: 'Baldur\\'s Gate 3'})-[:AVAILABLE_ON]->(:Platform {name: 'Windows'})\n",
|
||||
"RETURN true\n",
|
||||
"Full Context:\n",
|
||||
"[{'true': True}]\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"Yes, Baldur's Gate 3 is available on Windows.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "08620465",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chain modifiers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6603e6c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To modify the behavior of your chain and obtain more context or additional information, you can modify the chain's parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d187a83",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Return direct query results\n",
|
||||
"The `return_direct` modifier specifies whether to return the direct results of the executed Cypher query or the processed natural language response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0533847d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Return the result of querying the graph directly\n",
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "afbe96fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain.run(\"Which studio published Baldur's Gate 3?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "94b32b6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (:Game {name: 'Baldur\\'s Gate 3'})-[:PUBLISHED_BY]->(p:Publisher)\n",
|
||||
"RETURN p.name\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"[{'p.name': 'Larian Studios'}]\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c97ab3a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Return query intermediate steps\n",
|
||||
"The `return_intermediate_steps` chain modifier enhances the returned response by including the intermediate steps of the query in addition to the initial query result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82f673c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Return all the intermediate steps of query execution\n",
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d87e0976",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain(\"Is Baldur's Gate 3 an Adventure game?\")\n",
|
||||
"print(f\"Intermediate steps: {response['intermediate_steps']}\")\n",
|
||||
"print(f\"Final response: {response['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "df12b3da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (g:Game {name: 'Baldur\\'s Gate 3'})-[:HAS_GENRE]->(genre:Genre {name: 'Adventure'})\n",
|
||||
"RETURN g, genre\n",
|
||||
"Full Context:\n",
|
||||
"[{'g': {'name': \"Baldur's Gate 3\"}, 'genre': {'name': 'Adventure'}}]\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"Intermediate steps: [{'query': \"MATCH (g:Game {name: 'Baldur\\\\'s Gate 3'})-[:HAS_GENRE]->(genre:Genre {name: 'Adventure'})\\nRETURN g, genre\"}, {'context': [{'g': {'name': \"Baldur's Gate 3\"}, 'genre': {'name': 'Adventure'}}]}]\n",
|
||||
"Final response: Yes, Baldur's Gate 3 is an Adventure game.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41124485",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Limit the number of query results\n",
|
||||
"The `top_k` modifier can be used when you want to restrict the maximum number of query results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7340fc87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Limit the maximum number of results returned by query\n",
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3a17cdc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain.run(\"What genres are associated with Baldur's Gate 3?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dcff33ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (:Game {name: 'Baldur\\'s Gate 3'})-[:HAS_GENRE]->(g:Genre)\n",
|
||||
"RETURN g.name\n",
|
||||
"Full Context:\n",
|
||||
"[{'g.name': 'Adventure'}, {'g.name': 'Role-Playing Game'}]\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"Baldur's Gate 3 is associated with the genres Adventure and Role-Playing Game.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2eb524a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Advanced querying"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "113be997",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As the complexity of your solution grows, you might encounter different use-cases that require careful handling. Ensuring your application's scalability is essential to maintain a smooth user flow without any hitches."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e0b2db17",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's instantiate our chain once again and attempt to ask some questions that users might potentially ask."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fc544d0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, model_name='gpt-3.5-turbo'\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e2abde2d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain.run(\"Is Baldur's Gate 3 available on PS5?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf22dc48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (g:Game {name: 'Baldur\\'s Gate 3'})-[:AVAILABLE_ON]->(p:Platform {name: 'PS5'})\n",
|
||||
"RETURN g.name, p.name\n",
|
||||
"Full Context:\n",
|
||||
"[]\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"I'm sorry, but I don't have the information to answer your question.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "293aa1c9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The generated Cypher query looks fine, but we didn't receive any information in response. This illustrates a common challenge when working with LLMs - the misalignment between how users phrase queries and how data is stored. In this case, the difference between user perception and the actual data storage can cause mismatches. Prompt refinement, the process of honing the model's prompts to better grasp these distinctions, is an efficient solution that tackles this issue. Through prompt refinement, the model gains increased proficiency in generating precise and pertinent queries, leading to the successful retrieval of the desired data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a87b2f1b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prompt refinement"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8edb9976",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To address this, we can adjust the initial Cypher prompt of the QA chain. This involves adding guidance to the LLM on how users can refer to specific platforms, such as PS5 in our case. We achieve this using the LangChain [PromptTemplate](https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/), creating a modified initial prompt. This modified prompt is then supplied as an argument to our refined Memgraph-LangChain instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "312dad05",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"CYPHER_GENERATION_TEMPLATE = \"\"\"\n",
|
||||
"Task:Generate Cypher statement to query a graph database.\n",
|
||||
"Instructions:\n",
|
||||
"Use only the provided relationship types and properties in the schema.\n",
|
||||
"Do not use any other relationship types or properties that are not provided.\n",
|
||||
"Schema:\n",
|
||||
"{schema}\n",
|
||||
"Note: Do not include any explanations or apologies in your responses.\n",
|
||||
"Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.\n",
|
||||
"Do not include any text except the generated Cypher statement.\n",
|
||||
"If the user asks about PS5, Play Station 5 or PS 5, that is the platform called PlayStation 5.\n",
|
||||
"\n",
|
||||
"The question is:\n",
|
||||
"{question}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"CYPHER_GENERATION_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"schema\", \"question\"], template=CYPHER_GENERATION_TEMPLATE\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2c297245",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), \n",
|
||||
" cypher_prompt=CYPHER_GENERATION_PROMPT,\n",
|
||||
" graph=graph, \n",
|
||||
" verbose=True, \n",
|
||||
" model_name='gpt-3.5-turbo'\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7efb11a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chain.run(\"Is Baldur's Gate 3 available on PS5?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "289db07f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```\n",
|
||||
"> Entering new GraphCypherQAChain chain...\n",
|
||||
"Generated Cypher:\n",
|
||||
"MATCH (g:Game {name: 'Baldur\\'s Gate 3'})-[:AVAILABLE_ON]->(p:Platform {name: 'PlayStation 5'})\n",
|
||||
"RETURN g.name, p.name\n",
|
||||
"Full Context:\n",
|
||||
"[{'g.name': \"Baldur's Gate 3\", 'p.name': 'PlayStation 5'}]\n",
|
||||
"\n",
|
||||
"> Finished chain.\n",
|
||||
"Yes, Baldur's Gate 3 is available on PlayStation 5.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84b5f6af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, with the revised initial Cypher prompt that includes guidance on platform naming, we are obtaining accurate and relevant results that align more closely with user queries. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a21108ad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This approach allows for further improvement of your QA chain. You can effortlessly integrate extra prompt refinement data into your chain, thereby enhancing the overall user experience of your app."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -40,7 +40,7 @@ pip install 'langchain[all]'
|
||||
|
||||
## From source
|
||||
|
||||
If you want to install from source, you can do so by cloning the repo and running:
|
||||
If you want to install from source, you can do so by cloning the repo and be sure that the directory is `PATH/TO/REPO/langchain/libs/langchain` running:
|
||||
|
||||
```bash
|
||||
pip install -e .
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Using PyPDF
|
||||
# Using PyPDF
|
||||
|
||||
Load PDF using `pypdf` into array of documents, where each document contains the page content and metadata with `page` number.
|
||||
|
||||
@@ -389,3 +389,17 @@ data[0]
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
## Using AmazonTextractPDFParser
|
||||
|
||||
The AmazonTextractPDFLoader calls the [Amazon Textract Service](https://aws.amazon.com/textract/) to convert PDFs into a Document structure. The loader does pure OCR at the moment, with more features like layout support planned, depending on demand. Single and multi-page documents are supported with up to 3000 pages and 512 MB of size.
|
||||
|
||||
For the call to be successful an AWS account is required, similar to the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) requirements.
|
||||
|
||||
Besides the AWS configuration, it is very similar to the other PDF loaders, while also supporting JPEG, PNG and TIFF and non-native PDF formats.
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import AmazonTextractPDFLoader
|
||||
loader = AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg")
|
||||
documents = loader.load()
|
||||
```
|
||||
@@ -43,7 +43,7 @@ llm("Tell me a joke")
|
||||
</CodeOutputBlock>
|
||||
|
||||
### `generate`: batch calls, richer outputs
|
||||
`generate` lets you can call the model with a list of strings, getting back a more complete response than just the text. This complete response can include things like multiple top responses and other LLM provider-specific information:
|
||||
`generate` lets you call the model with a list of strings, getting back a more complete response than just the text. This complete response can include things like multiple top responses and other LLM provider-specific information:
|
||||
|
||||
```python
|
||||
llm_result = llm.generate(["Tell me a joke", "Tell me a poem"]*15)
|
||||
|
||||
@@ -5,34 +5,24 @@ from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
output_parser = CommaSeparatedListOutputParser()
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
format_instructions = output_parser.get_format_instructions()
|
||||
prompt = PromptTemplate(
|
||||
template="List five {subject}.\n{format_instructions}",
|
||||
input_variables=["subject"],
|
||||
partial_variables={"format_instructions": format_instructions}
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
model = OpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
_input = prompt.format(subject="ice cream flavors")
|
||||
output = model(_input)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output_parser.parse(output)
|
||||
```
|
||||
|
||||
The resulting output will be:
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
|
||||
@@ -13,7 +13,6 @@ from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.human.tool import HumanInputRun
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
from pydantic import ValidationError
|
||||
|
||||
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
|
||||
AutoGPTOutputParser,
|
||||
@@ -23,6 +22,7 @@ from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPromp
|
||||
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import (
|
||||
FINISH_NAME,
|
||||
)
|
||||
from langchain_experimental.pydantic_v1 import ValidationError
|
||||
|
||||
|
||||
class AutoGPT:
|
||||
|
||||
@@ -2,7 +2,8 @@ from typing import Any, Dict, List
|
||||
|
||||
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
from pydantic import Field
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Field
|
||||
|
||||
|
||||
class AutoGPTMemory(BaseChatMemory):
|
||||
|
||||
@@ -7,9 +7,9 @@ from langchain.prompts.chat import (
|
||||
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
|
||||
from langchain_experimental.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
|
||||
|
||||
@@ -6,7 +6,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
|
||||
TaskCreationChain,
|
||||
@@ -17,6 +16,7 @@ from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
|
||||
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
|
||||
TaskPrioritizationChain,
|
||||
)
|
||||
from langchain_experimental.pydantic_v1 import BaseModel, Field
|
||||
|
||||
|
||||
class BabyAGI(Chain, BaseModel):
|
||||
|
||||
@@ -13,7 +13,8 @@ from langchain.prompts.chat import (
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.tools.base import BaseTool
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain_experimental.pydantic_v1 import BaseModel
|
||||
|
||||
DEMONSTRATIONS = [
|
||||
{
|
||||
|
||||
@@ -6,9 +6,15 @@ from typing import Any, Optional, Union
|
||||
import duckdb
|
||||
import pandas as pd
|
||||
from langchain.graphs.networkx_graph import NetworkxEntityGraph
|
||||
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
|
||||
|
||||
from langchain_experimental.cpal.constants import Constant
|
||||
from langchain_experimental.pydantic_v1 import (
|
||||
BaseModel,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
root_validator,
|
||||
validator,
|
||||
)
|
||||
|
||||
|
||||
class NarrativeModel(BaseModel):
|
||||
|
||||
@@ -5,9 +5,9 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
|
||||
from langchain_experimental.pydantic_v1 import BaseModel, Field
|
||||
|
||||
|
||||
class GenerativeAgent(BaseModel):
|
||||
|
||||
@@ -19,7 +19,8 @@ from langchain.schema.messages import (
|
||||
BaseMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from pydantic import root_validator
|
||||
|
||||
from langchain_experimental.pydantic_v1 import root_validator
|
||||
|
||||
prompt = """In addition to responding, you can use tools. \
|
||||
You have access to the following tools.
|
||||
|
||||
@@ -6,7 +6,8 @@ from typing import TYPE_CHECKING, Any, List, Optional, cast
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
|
||||
from pydantic import Field, root_validator
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Field, root_validator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import jsonformer
|
||||
|
||||
@@ -6,7 +6,8 @@ from typing import TYPE_CHECKING, Any, List, Optional, cast
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
|
||||
from langchain.llms.utils import enforce_stop_tokens
|
||||
from pydantic import Field, root_validator
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Field, root_validator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import rellm
|
||||
|
||||
@@ -15,10 +15,10 @@ from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.utilities import PythonREPL
|
||||
from pydantic import Extra, Field
|
||||
|
||||
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
|
||||
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT
|
||||
from langchain_experimental.pydantic_v1 import Extra, Field
|
||||
|
||||
COMMAND_EXECUTION_FUNCTIONS = ["system", "exec", "execfile", "eval"]
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ from langchain.callbacks.manager import (
|
||||
CallbackManagerForChainRun,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import Field
|
||||
|
||||
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
|
||||
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
|
||||
@@ -13,6 +12,7 @@ from langchain_experimental.plan_and_execute.schema import (
|
||||
BaseStepContainer,
|
||||
ListStepContainer,
|
||||
)
|
||||
from langchain_experimental.pydantic_v1 import Field
|
||||
|
||||
|
||||
class PlanAndExecute(Chain):
|
||||
|
||||
@@ -3,9 +3,9 @@ from typing import Any
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain_experimental.plan_and_execute.schema import StepResponse
|
||||
from langchain_experimental.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
class BaseExecutor(BaseModel):
|
||||
|
||||
@@ -3,9 +3,9 @@ from typing import Any, List, Optional
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.llm import LLMChain
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParser
|
||||
from langchain_experimental.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
class BasePlanner(BaseModel):
|
||||
|
||||
@@ -2,7 +2,8 @@ from abc import abstractmethod
|
||||
from typing import List, Tuple
|
||||
|
||||
from langchain.schema import BaseOutputParser
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain_experimental.pydantic_v1 import BaseModel, Field
|
||||
|
||||
|
||||
class Step(BaseModel):
|
||||
|
||||
0
libs/experimental/langchain_experimental/py.typed
Normal file
0
libs/experimental/langchain_experimental/py.typed
Normal file
@@ -0,0 +1,23 @@
|
||||
from importlib import metadata
|
||||
|
||||
## Create namespaces for pydantic v1 and v2.
|
||||
# This code must stay at the top of the file before other modules may
|
||||
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
|
||||
#
|
||||
# This hack is done for the following reasons:
|
||||
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
|
||||
# both dependencies and dependents may be stuck on either version of v1 or v2.
|
||||
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
|
||||
# unambiguously uses either v1 or v2 API.
|
||||
# * This change is easier to roll out and roll back.
|
||||
|
||||
try:
|
||||
from pydantic.v1 import * # noqa: F403
|
||||
except ImportError:
|
||||
from pydantic import * # noqa: F403
|
||||
|
||||
|
||||
try:
|
||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||
except metadata.PackageNotFoundError:
|
||||
_PYDANTIC_MAJOR_VERSION = 0
|
||||
@@ -0,0 +1,4 @@
|
||||
try:
|
||||
from pydantic.v1.dataclasses import * # noqa: F403
|
||||
except ImportError:
|
||||
from pydantic.dataclasses import * # noqa: F403
|
||||
@@ -0,0 +1,4 @@
|
||||
try:
|
||||
from pydantic.v1.main import * # noqa: F403
|
||||
except ImportError:
|
||||
from pydantic.main import * # noqa: F403
|
||||
@@ -13,7 +13,8 @@ from langchain.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import LLMResult, PromptValue
|
||||
from pydantic import Extra, root_validator
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Extra, root_validator
|
||||
|
||||
|
||||
class SmartLLMChain(Chain):
|
||||
|
||||
@@ -13,7 +13,8 @@ from langchain.schema import BasePromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools.sql_database.prompt import QUERY_CHECKER
|
||||
from langchain.utilities.sql_database import SQLDatabase
|
||||
from pydantic import Extra, Field, root_validator
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
|
||||
|
||||
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ from langchain.callbacks.manager import (
|
||||
CallbackManagerForChainRun,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import Extra
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Extra
|
||||
from langchain_experimental.tot.checker import ToTChecker
|
||||
from langchain_experimental.tot.controller import ToTController
|
||||
from langchain_experimental.tot.memory import ToTDFSMemory
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
from enum import Enum
|
||||
from typing import Set
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from langchain_experimental.pydantic_v1 import BaseModel, Field
|
||||
|
||||
|
||||
class ThoughtValidity(Enum):
|
||||
|
||||
@@ -11,8 +11,8 @@ from typing import Any, Dict, List, Tuple
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from pydantic import Field
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Field
|
||||
from langchain_experimental.tot.prompts import COT_PROMPT, PROPOSE_PROMPT
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-experimental"
|
||||
version = "0.0.9"
|
||||
version = "0.0.10"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
repository = "https://www.github.com/hwchase17/langchain"
|
||||
repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
|
||||
@@ -3,7 +3,8 @@ from typing import Any, Dict, List, Mapping, Optional, cast
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.base import LLM
|
||||
from pydantic import validator
|
||||
|
||||
from langchain_experimental.pydantic_v1 import validator
|
||||
|
||||
|
||||
class FakeLLM(LLM):
|
||||
|
||||
@@ -76,9 +76,9 @@ lint format: PYTHON_FILES=.
|
||||
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
|
||||
|
||||
lint lint_diff:
|
||||
poetry run mypy $(PYTHON_FILES)
|
||||
poetry run black $(PYTHON_FILES) --check
|
||||
poetry run ruff .
|
||||
poetry run black $(PYTHON_FILES) --check
|
||||
poetry run mypy $(PYTHON_FILES)
|
||||
|
||||
format format_diff:
|
||||
poetry run black $(PYTHON_FILES)
|
||||
|
||||
@@ -1,38 +1,8 @@
|
||||
# ruff: noqa: E402
|
||||
"""Main entrypoint into package."""
|
||||
import importlib
|
||||
import sys
|
||||
from importlib import metadata
|
||||
from typing import Optional
|
||||
|
||||
## Create namespaces for pydantic v1 and v2.
|
||||
# This code must stay at the top of the file before other modules may
|
||||
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
|
||||
#
|
||||
# This hack is done for the following reasons:
|
||||
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
|
||||
# both dependencies and dependents may be stuck on either version of v1 or v2.
|
||||
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
|
||||
# unambiguously uses either v1 or v2 API.
|
||||
# * This change is easier to roll out and roll back.
|
||||
|
||||
try:
|
||||
pydantic_v1 = importlib.import_module("pydantic.v1")
|
||||
except ImportError:
|
||||
pydantic_v1 = importlib.import_module("pydantic")
|
||||
|
||||
if "pydantic_v1" not in sys.modules:
|
||||
# Use a conditional because langchain experimental
|
||||
# will use the same strategy to add pydantic_v1 to sys.modules
|
||||
# and may run prior to langchain core package.
|
||||
sys.modules["pydantic_v1"] = pydantic_v1
|
||||
|
||||
try:
|
||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||
except metadata.PackageNotFoundError:
|
||||
_PYDANTIC_MAJOR_VERSION = 0
|
||||
|
||||
|
||||
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
|
||||
from langchain.cache import BaseCache
|
||||
from langchain.chains import (
|
||||
|
||||
@@ -10,7 +10,6 @@ from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
import yaml
|
||||
from pydantic_v1 import BaseModel, root_validator
|
||||
|
||||
from langchain.agents.agent_iterator import AgentExecutorIterator
|
||||
from langchain.agents.agent_types import AgentType
|
||||
@@ -27,6 +26,7 @@ from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.pydantic_v1 import BaseModel, root_validator
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
@@ -615,9 +615,9 @@ class Agent(BaseSingleActionAgent):
|
||||
class ExceptionTool(BaseTool):
|
||||
"""Tool that just returns the query."""
|
||||
|
||||
name = "_Exception"
|
||||
name: str = "_Exception"
|
||||
"""Name of the tool."""
|
||||
description = "Exception tool"
|
||||
description: str = "Exception tool"
|
||||
"""Description of the tool."""
|
||||
|
||||
def _run(
|
||||
|
||||
@@ -2,9 +2,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.amadeus.closest_airport import AmadeusClosestAirport
|
||||
from langchain.tools.amadeus.flight_search import AmadeusFlightSearch
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
from pydantic_v1 import BaseModel
|
||||
|
||||
from langchain.pydantic_v1 import BaseModel
|
||||
from langchain.tools import BaseTool
|
||||
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic_v1 import root_validator
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import root_validator
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.file_management.copy import CopyFileTool
|
||||
from langchain.tools.file_management.delete import DeleteFileTool
|
||||
|
||||
@@ -2,9 +2,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.gmail.create_draft import GmailCreateDraft
|
||||
from langchain.tools.gmail.get_message import GmailGetMessage
|
||||
|
||||
@@ -2,10 +2,9 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, List, Optional, Sequence
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.agents.agent_toolkits.nla.tool import NLATool
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec
|
||||
|
||||
@@ -2,9 +2,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.office365.create_draft_message import O365CreateDraftMessage
|
||||
from langchain.tools.office365.events_search import O365SearchEvents
|
||||
|
||||
@@ -5,7 +5,6 @@ from functools import partial
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.agent_toolkits.openapi.planner_prompt import (
|
||||
@@ -33,6 +32,7 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.memory import ReadOnlySharedMemory
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema import BasePromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
@@ -66,7 +66,7 @@ def _get_default_llm_chain_factory(
|
||||
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Requests GET tool with LLM-instructed extraction of truncated responses."""
|
||||
|
||||
name = "requests_get"
|
||||
name: str = "requests_get"
|
||||
"""Tool name."""
|
||||
description = REQUESTS_GET_TOOL_DESCRIPTION
|
||||
"""Tool description."""
|
||||
@@ -96,7 +96,7 @@ class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Requests POST tool with LLM-instructed extraction of truncated responses."""
|
||||
|
||||
name = "requests_post"
|
||||
name: str = "requests_post"
|
||||
"""Tool name."""
|
||||
description = REQUESTS_POST_TOOL_DESCRIPTION
|
||||
"""Tool description."""
|
||||
@@ -125,7 +125,7 @@ class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Requests PATCH tool with LLM-instructed extraction of truncated responses."""
|
||||
|
||||
name = "requests_patch"
|
||||
name: str = "requests_patch"
|
||||
"""Tool name."""
|
||||
description = REQUESTS_PATCH_TOOL_DESCRIPTION
|
||||
"""Tool description."""
|
||||
@@ -154,7 +154,7 @@ class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""A tool that sends a DELETE request and parses the response."""
|
||||
|
||||
name = "requests_delete"
|
||||
name: str = "requests_delete"
|
||||
"""The name of the tool."""
|
||||
description = REQUESTS_DELETE_TOOL_DESCRIPTION
|
||||
"""The description of the tool."""
|
||||
|
||||
@@ -3,9 +3,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List, Optional, Type, cast
|
||||
|
||||
from pydantic_v1 import Extra, root_validator
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import Extra, root_validator
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.tools.playwright.base import (
|
||||
BaseBrowserTool,
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
"""Toolkit for interacting with a Power BI dataset."""
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
@@ -13,6 +11,7 @@ from langchain.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.powerbi.prompt import (
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
"""Toolkit for interacting with Spark SQL."""
|
||||
from typing import List
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.spark_sql.tool import (
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
"""Toolkit for interacting with an SQL database."""
|
||||
from typing import List
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.sql_database.tool import (
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
"""Toolkit for interacting with a vector store."""
|
||||
from typing import List
|
||||
|
||||
from pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.pydantic_v1 import BaseModel, Field
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.vectorstore.tool import (
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from typing import Any, List, Optional, Sequence, Tuple
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent import Agent, AgentOutputParser
|
||||
from langchain.agents.chat.output_parser import ChatOutputParser
|
||||
from langchain.agents.chat.prompt import (
|
||||
@@ -18,6 +16,7 @@ from langchain.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema import AgentAction, BasePromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
@@ -3,8 +3,6 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, List, Optional, Sequence
|
||||
|
||||
from pydantic_v1 import Field
|
||||
|
||||
from langchain.agents.agent import Agent, AgentOutputParser
|
||||
from langchain.agents.agent_types import AgentType
|
||||
from langchain.agents.conversational.output_parser import ConvoOutputParser
|
||||
@@ -13,6 +11,7 @@ from langchain.agents.utils import validate_tools_single_input
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user