mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-16 01:59:52 +00:00
Compare commits
79 Commits
bagatur/go
...
v0.0.256
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc2f450f2d | ||
|
|
aeaef8f3a3 | ||
|
|
472f00ada7 | ||
|
|
6e3fa59073 | ||
|
|
a616e19975 | ||
|
|
100d9ce4c7 | ||
|
|
c9da300e4d | ||
|
|
5a9765b1b5 | ||
|
|
454998c1fb | ||
|
|
0adc282d70 | ||
|
|
bd4865b6fe | ||
|
|
485d716c21 | ||
|
|
b57fa1a39c | ||
|
|
6b93670410 | ||
|
|
2bb1d256f3 | ||
|
|
4a7ebb7184 | ||
|
|
797c9e92c8 | ||
|
|
5f1aab5487 | ||
|
|
983678dedc | ||
|
|
f76d50d8dc | ||
|
|
15c271e7b3 | ||
|
|
d7b613a293 | ||
|
|
2f309a4ce6 | ||
|
|
2111ed3c75 | ||
|
|
d9bc46186d | ||
|
|
1bd4890506 | ||
|
|
b0d0338f21 | ||
|
|
a22d502248 | ||
|
|
9b86235a56 | ||
|
|
9fc9018951 | ||
|
|
ef5bc1fef1 | ||
|
|
1d68470bac | ||
|
|
c8f3615aa6 | ||
|
|
d00a247da7 | ||
|
|
21771a6f1c | ||
|
|
e5fed7d535 | ||
|
|
19dfe166c9 | ||
|
|
91a0817e39 | ||
|
|
f437311eef | ||
|
|
003e1ca9a0 | ||
|
|
8374367de2 | ||
|
|
82ef1f587d | ||
|
|
b0d0399d34 | ||
|
|
a6ee646ef3 | ||
|
|
bd61757423 | ||
|
|
affaaea87b | ||
|
|
8c35fcb571 | ||
|
|
e45be8b3f6 | ||
|
|
0d5a90f30a | ||
|
|
6b007e2829 | ||
|
|
be638ad77d | ||
|
|
115a77142a | ||
|
|
f0b0c72d98 | ||
|
|
6aee589eec | ||
|
|
5b7ff215e8 | ||
|
|
0f0ccfe7f6 | ||
|
|
2759e2d857 | ||
|
|
0f68054401 | ||
|
|
0ead8ea708 | ||
|
|
c7ea6e9ff8 | ||
|
|
812419d946 | ||
|
|
873a80e496 | ||
|
|
d1b95db874 | ||
|
|
6c3573e7f6 | ||
|
|
179a39954d | ||
|
|
6f0bccfeb5 | ||
|
|
e68a1d73d0 | ||
|
|
29f51055e8 | ||
|
|
5d765408ce | ||
|
|
404d103c41 | ||
|
|
47eea32f6a | ||
|
|
b786335dd1 | ||
|
|
f81e613086 | ||
|
|
8ef7e14a85 | ||
|
|
53e4148a1b | ||
|
|
4e8f11b36a | ||
|
|
2928a1a3c9 | ||
|
|
814faa9de5 | ||
|
|
8a8917e0d9 |
7
Makefile
7
Makefile
@@ -43,7 +43,12 @@ spell_fix:
|
||||
|
||||
help:
|
||||
@echo '----'
|
||||
@echo 'coverage - run unit tests and generate coverage report'
|
||||
@echo 'clean - run docs_clean and api_docs_clean'
|
||||
@echo 'docs_build - build the documentation'
|
||||
@echo 'docs_clean - clean the documentation build artifacts'
|
||||
@echo 'docs_linkcheck - run linkchecker on the documentation'
|
||||
@echo 'api_docs_build - build the API Reference documentation'
|
||||
@echo 'api_docs_clean - clean the API Reference documentation build artifacts'
|
||||
@echo 'api_docs_linkcheck - run linkchecker on the API Reference documentation'
|
||||
@echo 'spell_check - run codespell on the project'
|
||||
@echo 'spell_fix - run codespell on the project and fix the errors'
|
||||
@@ -100,6 +100,9 @@ extensions = [
|
||||
]
|
||||
source_suffix = [".rst"]
|
||||
|
||||
# some autodoc pydantic options are repeated in the actual template.
|
||||
# potentially user error, but there may be bugs in the sphinx extension
|
||||
# with options not being passed through correctly (from either the location in the code)
|
||||
autodoc_pydantic_model_show_json = False
|
||||
autodoc_pydantic_field_list_validators = False
|
||||
autodoc_pydantic_config_members = False
|
||||
@@ -112,13 +115,6 @@ autodoc_member_order = "groupwise"
|
||||
autoclass_content = "both"
|
||||
autodoc_typehints_format = "short"
|
||||
|
||||
autodoc_default_options = {
|
||||
"members": True,
|
||||
"show-inheritance": True,
|
||||
"inherited-members": "BaseModel",
|
||||
"undoc-members": True,
|
||||
"special-members": "__call__",
|
||||
}
|
||||
# autodoc_typehints = "description"
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["templates"]
|
||||
|
||||
@@ -1,49 +1,209 @@
|
||||
"""Script for auto-generating api_reference.rst"""
|
||||
import glob
|
||||
import re
|
||||
"""Script for auto-generating api_reference.rst."""
|
||||
import importlib
|
||||
import inspect
|
||||
import typing
|
||||
from pathlib import Path
|
||||
from typing import TypedDict, Sequence, List, Dict, Literal, Union
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
PKG_DIR = ROOT_DIR / "libs" / "langchain" / "langchain"
|
||||
EXP_DIR = ROOT_DIR / "libs" / "experimental" / "langchain_experimental"
|
||||
WRITE_FILE = Path(__file__).parent / "api_reference.rst"
|
||||
EXP_WRITE_FILE = Path(__file__).parent / "experimental_api_reference.rst"
|
||||
WRITE_FILE = HERE / "api_reference.rst"
|
||||
EXP_WRITE_FILE = HERE / "experimental_api_reference.rst"
|
||||
|
||||
|
||||
def load_members(dir: Path) -> dict:
|
||||
members: dict = {}
|
||||
for py in glob.glob(str(dir) + "/**/*.py", recursive=True):
|
||||
module = py[len(str(dir)) + 1 :].replace(".py", "").replace("/", ".")
|
||||
top_level = module.split(".")[0]
|
||||
if top_level not in members:
|
||||
members[top_level] = {"classes": [], "functions": []}
|
||||
with open(py, "r") as f:
|
||||
for line in f.readlines():
|
||||
cls = re.findall(r"^class ([^_].*)\(", line)
|
||||
members[top_level]["classes"].extend([module + "." + c for c in cls])
|
||||
func = re.findall(r"^def ([^_].*)\(", line)
|
||||
afunc = re.findall(r"^async def ([^_].*)\(", line)
|
||||
func_strings = [module + "." + f for f in func + afunc]
|
||||
members[top_level]["functions"].extend(func_strings)
|
||||
return members
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
|
||||
|
||||
def construct_doc(pkg: str, members: dict) -> str:
|
||||
class ClassInfo(TypedDict):
|
||||
"""Information about a class."""
|
||||
|
||||
name: str
|
||||
"""The name of the class."""
|
||||
qualified_name: str
|
||||
"""The fully qualified name of the class."""
|
||||
kind: ClassKind
|
||||
"""The kind of the class."""
|
||||
is_public: bool
|
||||
"""Whether the class is public or not."""
|
||||
|
||||
|
||||
class FunctionInfo(TypedDict):
|
||||
"""Information about a function."""
|
||||
|
||||
name: str
|
||||
"""The name of the function."""
|
||||
qualified_name: str
|
||||
"""The fully qualified name of the function."""
|
||||
is_public: bool
|
||||
"""Whether the function is public or not."""
|
||||
|
||||
|
||||
class ModuleMembers(TypedDict):
|
||||
"""A dictionary of module members."""
|
||||
|
||||
classes_: Sequence[ClassInfo]
|
||||
functions: Sequence[FunctionInfo]
|
||||
|
||||
|
||||
def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
"""Load all members of a module.
|
||||
|
||||
Args:
|
||||
module_path: Path to the module.
|
||||
namespace: the namespace of the module.
|
||||
|
||||
Returns:
|
||||
list: A list of loaded module objects.
|
||||
"""
|
||||
classes_: List[ClassInfo] = []
|
||||
functions: List[FunctionInfo] = []
|
||||
module = importlib.import_module(module_path)
|
||||
for name, type_ in inspect.getmembers(module):
|
||||
if not hasattr(type_, "__module__"):
|
||||
continue
|
||||
if type_.__module__ != module_path:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
if type(type_) == typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif issubclass(type_, Enum):
|
||||
kind = "enum"
|
||||
elif issubclass(type_, BaseModel):
|
||||
kind = "Pydantic"
|
||||
else:
|
||||
kind = "Regular"
|
||||
|
||||
classes_.append(
|
||||
ClassInfo(
|
||||
name=name,
|
||||
qualified_name=f"{namespace}.{name}",
|
||||
kind=kind,
|
||||
is_public=not name.startswith("_"),
|
||||
)
|
||||
)
|
||||
elif inspect.isfunction(type_):
|
||||
functions.append(
|
||||
FunctionInfo(
|
||||
name=name,
|
||||
qualified_name=f"{namespace}.{name}",
|
||||
is_public=not name.startswith("_"),
|
||||
)
|
||||
)
|
||||
else:
|
||||
continue
|
||||
|
||||
return ModuleMembers(
|
||||
classes_=classes_,
|
||||
functions=functions,
|
||||
)
|
||||
|
||||
|
||||
def _merge_module_members(
|
||||
module_members: Sequence[ModuleMembers],
|
||||
) -> ModuleMembers:
|
||||
"""Merge module members."""
|
||||
classes_: List[ClassInfo] = []
|
||||
functions: List[FunctionInfo] = []
|
||||
for module in module_members:
|
||||
classes_.extend(module["classes_"])
|
||||
functions.extend(module["functions"])
|
||||
|
||||
return ModuleMembers(
|
||||
classes_=classes_,
|
||||
functions=functions,
|
||||
)
|
||||
|
||||
|
||||
def _load_package_modules(
|
||||
package_directory: Union[str, Path]
|
||||
) -> Dict[str, ModuleMembers]:
|
||||
"""Recursively load modules of a package based on the file system.
|
||||
|
||||
Traversal based on the file system makes it easy to determine which
|
||||
of the modules/packages are part of the package vs. 3rd party or built-in.
|
||||
|
||||
Parameters:
|
||||
package_directory: Path to the package directory.
|
||||
|
||||
Returns:
|
||||
list: A list of loaded module objects.
|
||||
"""
|
||||
package_path = (
|
||||
Path(package_directory)
|
||||
if isinstance(package_directory, str)
|
||||
else package_directory
|
||||
)
|
||||
modules_by_namespace = {}
|
||||
|
||||
package_name = package_path.name
|
||||
|
||||
for file_path in package_path.rglob("*.py"):
|
||||
if not file_path.name.startswith("__"):
|
||||
relative_module_name = file_path.relative_to(package_path)
|
||||
# Get the full namespace of the module
|
||||
namespace = str(relative_module_name).replace(".py", "").replace("/", ".")
|
||||
# Keep only the top level namespace
|
||||
top_namespace = namespace.split(".")[0]
|
||||
|
||||
try:
|
||||
module_members = _load_module_members(
|
||||
f"{package_name}.{namespace}", namespace
|
||||
)
|
||||
# Merge module members if the namespace already exists
|
||||
if top_namespace in modules_by_namespace:
|
||||
existing_module_members = modules_by_namespace[top_namespace]
|
||||
_module_members = _merge_module_members(
|
||||
[existing_module_members, module_members]
|
||||
)
|
||||
else:
|
||||
_module_members = module_members
|
||||
|
||||
modules_by_namespace[top_namespace] = _module_members
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}")
|
||||
|
||||
return modules_by_namespace
|
||||
|
||||
|
||||
def _construct_doc(pkg: str, members_by_namespace: Dict[str, ModuleMembers]) -> str:
|
||||
"""Construct the contents of the reference.rst file for the given package.
|
||||
|
||||
Args:
|
||||
pkg: The package name
|
||||
members_by_namespace: The members of the package, dict organized by top level
|
||||
module contains a list of classes and functions
|
||||
inside of the top level namespace.
|
||||
|
||||
Returns:
|
||||
The contents of the reference.rst file.
|
||||
"""
|
||||
full_doc = f"""\
|
||||
=============
|
||||
=======================
|
||||
``{pkg}`` API Reference
|
||||
=============
|
||||
=======================
|
||||
|
||||
"""
|
||||
for module, _members in sorted(members.items(), key=lambda kv: kv[0]):
|
||||
classes = _members["classes"]
|
||||
namespaces = sorted(members_by_namespace)
|
||||
|
||||
for module in namespaces:
|
||||
_members = members_by_namespace[module]
|
||||
classes = _members["classes_"]
|
||||
functions = _members["functions"]
|
||||
if not (classes or functions):
|
||||
continue
|
||||
section = f":mod:`{pkg}.{module}`"
|
||||
underline = "=" * (len(section) + 1)
|
||||
full_doc += f"""\
|
||||
{section}
|
||||
{'=' * (len(section) + 1)}
|
||||
{underline}
|
||||
|
||||
.. automodule:: {pkg}.{module}
|
||||
:no-members:
|
||||
@@ -52,7 +212,6 @@ def construct_doc(pkg: str, members: dict) -> str:
|
||||
"""
|
||||
|
||||
if classes:
|
||||
cstring = "\n ".join(sorted(classes))
|
||||
full_doc += f"""\
|
||||
Classes
|
||||
--------------
|
||||
@@ -60,13 +219,31 @@ Classes
|
||||
|
||||
.. autosummary::
|
||||
:toctree: {module}
|
||||
:template: class.rst
|
||||
|
||||
{cstring}
|
||||
|
||||
"""
|
||||
|
||||
for class_ in classes:
|
||||
if not class_['is_public']:
|
||||
continue
|
||||
|
||||
if class_["kind"] == "TypedDict":
|
||||
template = "typeddict.rst"
|
||||
elif class_["kind"] == "enum":
|
||||
template = "enum.rst"
|
||||
elif class_["kind"] == "Pydantic":
|
||||
template = "pydantic.rst"
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
full_doc += f"""\
|
||||
:template: {template}
|
||||
|
||||
{class_["qualified_name"]}
|
||||
|
||||
"""
|
||||
|
||||
if functions:
|
||||
fstring = "\n ".join(sorted(functions))
|
||||
_functions = [f["qualified_name"] for f in functions if f["is_public"]]
|
||||
fstring = "\n ".join(sorted(_functions))
|
||||
full_doc += f"""\
|
||||
Functions
|
||||
--------------
|
||||
@@ -83,12 +260,15 @@ Functions
|
||||
|
||||
|
||||
def main() -> None:
|
||||
lc_members = load_members(PKG_DIR)
|
||||
lc_doc = ".. _api_reference:\n\n" + construct_doc("langchain", lc_members)
|
||||
"""Generate the reference.rst file for each package."""
|
||||
lc_members = _load_package_modules(PKG_DIR)
|
||||
lc_doc = ".. _api_reference:\n\n" + _construct_doc("langchain", lc_members)
|
||||
with open(WRITE_FILE, "w") as f:
|
||||
f.write(lc_doc)
|
||||
exp_members = load_members(EXP_DIR)
|
||||
exp_doc = ".. _experimental_api_reference:\n\n" + construct_doc("langchain_experimental", exp_members)
|
||||
exp_members = _load_package_modules(EXP_DIR)
|
||||
exp_doc = ".. _experimental_api_reference:\n\n" + _construct_doc(
|
||||
"langchain_experimental", exp_members
|
||||
)
|
||||
with open(EXP_WRITE_FILE, "w") as f:
|
||||
f.write(exp_doc)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
-e libs/langchain
|
||||
-e libs/experimental
|
||||
autodoc_pydantic==1.8.0
|
||||
myst_parser
|
||||
nbsphinx==0.8.9
|
||||
@@ -10,4 +11,4 @@ sphinx-panels
|
||||
toml
|
||||
myst_nb
|
||||
sphinx_copybutton
|
||||
pydata-sphinx-theme==0.13.1
|
||||
pydata-sphinx-theme==0.13.1
|
||||
|
||||
@@ -5,17 +5,6 @@
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
@@ -27,4 +16,21 @@
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
14
docs/api_reference/templates/enum.rst
Normal file
14
docs/api_reference/templates/enum.rst
Normal file
@@ -0,0 +1,14 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% for item in attributes %}
|
||||
.. autoattribute:: {{ item }}
|
||||
{% endfor %}
|
||||
{% endblock %}
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
22
docs/api_reference/templates/pydantic.rst
Normal file
22
docs/api_reference/templates/pydantic.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
:model-show-json: False
|
||||
:model-show-config-summary: False
|
||||
:model-show-validator-members: False
|
||||
:model-show-field-summary: False
|
||||
:field-signature-prefix: param
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
|
||||
{% block attributes %}
|
||||
{% endblock %}
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
14
docs/api_reference/templates/typeddict.rst
Normal file
14
docs/api_reference/templates/typeddict.rst
Normal file
@@ -0,0 +1,14 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% for item in attributes %}
|
||||
.. autoattribute:: {{ item }}
|
||||
{% endfor %}
|
||||
{% endblock %}
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -19,7 +19,7 @@
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="http://scikit-learn.org/stable/{{pagename}}.html" />
|
||||
<link rel="canonical" href="https://api.python.langchain.com/en/latest/{{pagename}}.html" />
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
|
||||
@@ -6,17 +6,6 @@
|
||||
{%- set top_container_cls = "sk-landing-container" %}
|
||||
{%- endif %}
|
||||
|
||||
{% if theme_link_to_live_contributing_page|tobool %}
|
||||
{# Link to development page for live builds #}
|
||||
{%- set development_link = "https://scikit-learn.org/dev/developers/index.html" %}
|
||||
{# Open on a new development page in new window/tab for live builds #}
|
||||
{%- set development_attrs = 'target="_blank" rel="noopener noreferrer"' %}
|
||||
{%- else %}
|
||||
{%- set development_link = pathto('developers/index') %}
|
||||
{%- set development_attrs = '' %}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
<nav id="navbar" class="{{ nav_bar_class }} navbar navbar-expand-md navbar-light bg-light py-0">
|
||||
<div class="container-fluid {{ top_container_cls }} px-0">
|
||||
{%- if logo_url %}
|
||||
|
||||
@@ -3,10 +3,12 @@ sidebar_position: 0
|
||||
---
|
||||
# Prompts
|
||||
|
||||
The new way of programming models is through prompts.
|
||||
A **prompt** refers to the input to the model.
|
||||
This input is often constructed from multiple components.
|
||||
LangChain provides several classes and functions to make constructing and working with prompts easy.
|
||||
A prompt for a language model is a set of instructions or input provided by a user to
|
||||
guide the model's response, helping it understand the context and generate relevant
|
||||
and coherent language-based output, such as answering questions, completing sentences,
|
||||
or engaging in a conversation.
|
||||
|
||||
- [Prompt templates](/docs/modules/model_io/prompts/prompt_templates/): Parametrize model inputs
|
||||
LangChain provides several classes and functions to help construct and work with prompts.
|
||||
|
||||
- [Prompt templates](/docs/modules/model_io/prompts/prompt_templates/): Parametrized model inputs
|
||||
- [Example selectors](/docs/modules/model_io/prompts/example_selectors/): Dynamically select examples to include in prompts
|
||||
@@ -4,18 +4,15 @@ sidebar_position: 0
|
||||
|
||||
# Prompt templates
|
||||
|
||||
Language models take text as input - that text is commonly referred to as a prompt.
|
||||
Typically this is not simply a hardcoded string but rather a combination of a template, some examples, and user input.
|
||||
LangChain provides several classes and functions to make constructing and working with prompts easy.
|
||||
Prompt templates are pre-defined recipes for generating prompts for language models.
|
||||
|
||||
## What is a prompt template?
|
||||
A template may include instructions, few shot examples, and specific context and
|
||||
questions appropriate for a given task.
|
||||
|
||||
A prompt template refers to a reproducible way to generate a prompt. It contains a text string ("the template"), that can take in a set of parameters from the end user and generates a prompt.
|
||||
LangChain provides tooling to create and work with prompt templates.
|
||||
|
||||
A prompt template can contain:
|
||||
- instructions to the language model,
|
||||
- a set of few shot examples to help the language model generate a better response,
|
||||
- a question to the language model.
|
||||
LangChain strives to create model agnostic templates to make it easy to reuse
|
||||
existing templates across different language models.
|
||||
|
||||
import GetStarted from "@snippets/modules/model_io/prompts/prompt_templates/get_started.mdx"
|
||||
|
||||
|
||||
71
docs/docs_skeleton/package-lock.json
generated
71
docs/docs_skeleton/package-lock.json
generated
@@ -12,7 +12,7 @@
|
||||
"@docusaurus/preset-classic": "2.4.0",
|
||||
"@docusaurus/remark-plugin-npm2yarn": "^2.4.0",
|
||||
"@mdx-js/react": "^1.6.22",
|
||||
"@mendable/search": "^0.0.125",
|
||||
"@mendable/search": "^0.0.137",
|
||||
"clsx": "^1.2.1",
|
||||
"json-loader": "^0.5.7",
|
||||
"process": "^0.11.10",
|
||||
@@ -3212,10 +3212,11 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@mendable/search": {
|
||||
"version": "0.0.125",
|
||||
"resolved": "https://registry.npmjs.org/@mendable/search/-/search-0.0.125.tgz",
|
||||
"integrity": "sha512-Mb1J3zDhOyBZV9cXqJocSOBNYGpe8+LQDqd9n9laPWxosSJcSTUewqtlIbMerrYsScBsxskoSiWgRsc7xF5z0Q==",
|
||||
"version": "0.0.137",
|
||||
"resolved": "https://registry.npmjs.org/@mendable/search/-/search-0.0.137.tgz",
|
||||
"integrity": "sha512-2J2fd5eqToK+mLzrSDA6NAr4F1kfql7QRiHpD7AUJJX0nqpvInhr/mMJKBCUSCv2z76UKCmF5wLuPSw+C90Qdg==",
|
||||
"dependencies": {
|
||||
"html-react-parser": "^4.2.0",
|
||||
"posthog-js": "^1.45.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
@@ -8332,6 +8333,33 @@
|
||||
"safe-buffer": "~5.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/html-dom-parser": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/html-dom-parser/-/html-dom-parser-4.0.0.tgz",
|
||||
"integrity": "sha512-TUa3wIwi80f5NF8CVWzkopBVqVAtlawUzJoLwVLHns0XSJGynss4jiY0mTWpiDOsuyw+afP+ujjMgRh9CoZcXw==",
|
||||
"dependencies": {
|
||||
"domhandler": "5.0.3",
|
||||
"htmlparser2": "9.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/html-dom-parser/node_modules/htmlparser2": {
|
||||
"version": "9.0.0",
|
||||
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-9.0.0.tgz",
|
||||
"integrity": "sha512-uxbSI98wmFT/G4P2zXx4OVx04qWUmyFPrD2/CNepa2Zo3GPNaCaaxElDgwUrwYWkK1nr9fft0Ya8dws8coDLLQ==",
|
||||
"funding": [
|
||||
"https://github.com/fb55/htmlparser2?sponsor=1",
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
],
|
||||
"dependencies": {
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.1.0",
|
||||
"entities": "^4.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/html-entities": {
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz",
|
||||
@@ -8375,6 +8403,20 @@
|
||||
"node": ">= 12"
|
||||
}
|
||||
},
|
||||
"node_modules/html-react-parser": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/html-react-parser/-/html-react-parser-4.2.0.tgz",
|
||||
"integrity": "sha512-gzU55AS+FI6qD7XaKe5BLuLFM2Xw0/LodfMWZlxV9uOHe7LCD5Lukx/EgYuBI3c0kLu0XlgFXnSzO0qUUn3Vrg==",
|
||||
"dependencies": {
|
||||
"domhandler": "5.0.3",
|
||||
"html-dom-parser": "4.0.0",
|
||||
"react-property": "2.0.0",
|
||||
"style-to-js": "1.1.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "0.14 || 15 || 16 || 17 || 18"
|
||||
}
|
||||
},
|
||||
"node_modules/html-tags": {
|
||||
"version": "3.3.1",
|
||||
"resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz",
|
||||
@@ -11762,6 +11804,11 @@
|
||||
"webpack": ">=4.41.1 || 5.x"
|
||||
}
|
||||
},
|
||||
"node_modules/react-property": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/react-property/-/react-property-2.0.0.tgz",
|
||||
"integrity": "sha512-kzmNjIgU32mO4mmH5+iUyrqlpFQhF8K2k7eZ4fdLSOPFrD1XgEuSBv9LDEgxRXTMBqMd8ppT0x6TIzqE5pdGdw=="
|
||||
},
|
||||
"node_modules/react-router": {
|
||||
"version": "5.3.4",
|
||||
"resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz",
|
||||
@@ -13127,6 +13174,22 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/style-to-js": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.3.tgz",
|
||||
"integrity": "sha512-zKI5gN/zb7LS/Vm0eUwjmjrXWw8IMtyA8aPBJZdYiQTXj4+wQ3IucOLIOnF7zCHxvW8UhIGh/uZh/t9zEHXNTQ==",
|
||||
"dependencies": {
|
||||
"style-to-object": "0.4.1"
|
||||
}
|
||||
},
|
||||
"node_modules/style-to-js/node_modules/style-to-object": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.1.tgz",
|
||||
"integrity": "sha512-HFpbb5gr2ypci7Qw+IOhnP2zOU7e77b+rzM+wTzXzfi1PrtBCX0E7Pk4wL4iTLnhzZ+JgEGAhX81ebTg/aYjQw==",
|
||||
"dependencies": {
|
||||
"inline-style-parser": "0.1.1"
|
||||
}
|
||||
},
|
||||
"node_modules/style-to-object": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"@docusaurus/preset-classic": "2.4.0",
|
||||
"@docusaurus/remark-plugin-npm2yarn": "^2.4.0",
|
||||
"@mdx-js/react": "^1.6.22",
|
||||
"@mendable/search": "^0.0.125",
|
||||
"@mendable/search": "^0.0.137",
|
||||
"clsx": "^1.2.1",
|
||||
"json-loader": "^0.5.7",
|
||||
"process": "^0.11.10",
|
||||
|
||||
BIN
docs/docs_skeleton/static/img/chat_use_case.png
Normal file
BIN
docs/docs_skeleton/static/img/chat_use_case.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 93 KiB |
BIN
docs/docs_skeleton/static/img/chat_use_case_2.png
Normal file
BIN
docs/docs_skeleton/static/img/chat_use_case_2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 102 KiB |
BIN
docs/docs_skeleton/static/img/extraction.png
Normal file
BIN
docs/docs_skeleton/static/img/extraction.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 125 KiB |
BIN
docs/docs_skeleton/static/img/extraction_trace_function.png
Normal file
BIN
docs/docs_skeleton/static/img/extraction_trace_function.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 131 KiB |
BIN
docs/docs_skeleton/static/img/extraction_trace_function_2.png
Normal file
BIN
docs/docs_skeleton/static/img/extraction_trace_function_2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 211 KiB |
BIN
docs/docs_skeleton/static/img/extraction_trace_joke.png
Normal file
BIN
docs/docs_skeleton/static/img/extraction_trace_joke.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 132 KiB |
@@ -22,7 +22,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 1,
|
||||
"id": "466b65b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -171,9 +171,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "decf7710",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
@@ -202,7 +200,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 10,
|
||||
"id": "f799664d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -347,7 +345,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 12,
|
||||
"id": "5d3d8ffe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -368,7 +366,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 2,
|
||||
"id": "33be32af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -380,7 +378,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 3,
|
||||
"id": "df3f3fa2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -424,9 +422,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "f3040b0c",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
@@ -477,9 +473,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "7ee8b2d4",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
@@ -515,7 +509,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 66,
|
||||
"execution_count": 4,
|
||||
"id": "3f30c348",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -526,7 +520,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 5,
|
||||
"id": "64ab1dbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -544,7 +538,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 6,
|
||||
"id": "7d628c97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -559,7 +553,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 68,
|
||||
"execution_count": 7,
|
||||
"id": "f60a5d0f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -572,7 +566,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 69,
|
||||
"execution_count": 8,
|
||||
"id": "7d007db6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -589,25 +583,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 70,
|
||||
"execution_count": 16,
|
||||
"id": "5c32cc89",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversational_qa_chain = RunnableMap({\n",
|
||||
" \"standalone_question\": {\n",
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
|
||||
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||
"}) | {\n",
|
||||
"_inputs = RunnableMap(\n",
|
||||
" {\n",
|
||||
" \"standalone_question\": {\n",
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
|
||||
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"_context = {\n",
|
||||
" \"context\": itemgetter(\"standalone_question\") | retriever | _combine_documents,\n",
|
||||
" \"question\": lambda x: x[\"standalone_question\"]\n",
|
||||
"} | ANSWER_PROMPT | ChatOpenAI()"
|
||||
"}\n",
|
||||
"conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 71,
|
||||
"execution_count": 17,
|
||||
"id": "135c8205",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -624,7 +622,7 @@
|
||||
"AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 71,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -638,7 +636,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 62,
|
||||
"execution_count": 15,
|
||||
"id": "424e7e7a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -655,7 +653,7 @@
|
||||
"AIMessage(content='Harrison worked at Kensho.', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 62,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -667,6 +665,149 @@
|
||||
"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c5543183",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### With Memory and returning source documents\n",
|
||||
"\n",
|
||||
"This shows how to use memory with the above. For memory, we need to manage that outside at the memory. For returning the retrieved documents, we just need to pass them through all the way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "e31dd17c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "d4bffe94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(return_messages=True, output_key=\"answer\", input_key=\"question\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "733be985",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# First we add a step to load memory\n",
|
||||
"# This needs to be a RunnableMap because its the first input\n",
|
||||
"loaded_memory = RunnableMap(\n",
|
||||
" {\n",
|
||||
" \"question\": itemgetter(\"question\"),\n",
|
||||
" \"memory\": memory.load_memory_variables,\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"# Next we add a step to expand memory into the variables\n",
|
||||
"expanded_memory = {\n",
|
||||
" \"question\": itemgetter(\"question\"),\n",
|
||||
" \"chat_history\": lambda x: x[\"memory\"][\"history\"]\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# Now we calculate the standalone question\n",
|
||||
"standalone_question = {\n",
|
||||
" \"standalone_question\": {\n",
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
|
||||
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||
"}\n",
|
||||
"# Now we retrieve the documents\n",
|
||||
"retrieved_documents = {\n",
|
||||
" \"docs\": itemgetter(\"standalone_question\") | retriever,\n",
|
||||
" \"question\": lambda x: x[\"standalone_question\"]\n",
|
||||
"}\n",
|
||||
"# Now we construct the inputs for the final prompt\n",
|
||||
"final_inputs = {\n",
|
||||
" \"context\": lambda x: _combine_documents(x[\"docs\"]),\n",
|
||||
" \"question\": itemgetter(\"question\")\n",
|
||||
"}\n",
|
||||
"# And finally, we do the part that returns the answers\n",
|
||||
"answer = {\n",
|
||||
" \"answer\": final_inputs | ANSWER_PROMPT | ChatOpenAI(),\n",
|
||||
" \"docs\": itemgetter(\"docs\"),\n",
|
||||
"}\n",
|
||||
"# And now we put it all together!\n",
|
||||
"final_chain = loaded_memory | expanded_memory | standalone_question | retrieved_documents | answer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "806e390c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of requested results 4 is greater than number of elements in index 1, updating n_results = 1\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer': AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False),\n",
|
||||
" 'docs': [Document(page_content='harrison worked at kensho', metadata={})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inputs = {\"question\": \"where did harrison work?\"}\n",
|
||||
"result = final_chain.invoke(inputs)\n",
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "977399fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Note that the memory does not save automatically\n",
|
||||
"# This will be improved in the future\n",
|
||||
"# For now you need to save it yourself\n",
|
||||
"memory.save_context(inputs, {\"answer\": result[\"answer\"].content})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "f94f7de4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': [HumanMessage(content='where did harrison work?', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False)]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 48,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f2bf8d3",
|
||||
@@ -1391,13 +1532,122 @@
|
||||
"response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4927a727-b4c8-453c-8c83-bd87b4fcac14",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Moderation\n",
|
||||
"\n",
|
||||
"This shows how to add in moderation (or other safeguards) around your LLM application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "179d3c03",
|
||||
"execution_count": 26,
|
||||
"id": "4f5f6449-940a-4f5c-97c0-39b71c3e2a68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"from langchain.chains import OpenAIModerationChain\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "fcb8312b-7e7a-424f-a3ec-76738c9a9d21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"moderate = OpenAIModerationChain()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "b24b9148-f6b0-4091-8ea8-d3fb281bd950",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = OpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"repeat after me: {input}\")\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "1c8ed87c-9ca6-4559-bf60-d40e94a0af08",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "5256b9bd-381a-42b0-bfa8-7e6d18f853cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nYou are stupid.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"you are stupid\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "fe6e3b33-dc9a-49d5-b194-ba750c58a628",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"moderated_chain = chain | moderate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "d8ba0cbd-c739-4d23-be9f-6ae092bd5ffb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': '\\n\\nYou are stupid.',\n",
|
||||
" 'output': \"Text was found that violates OpenAI's content policy.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"moderated_chain.invoke({\"input\": \"you are stupid\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0a85ba4-f782-47b8-b16f-8b7a61d6dab7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Conversational Retrieval With Memory"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -1416,7 +1666,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -71,3 +71,6 @@ or any other local ENV management tool.
|
||||
|
||||
Currently `StreamlitCallbackHandler` is geared towards use with a LangChain Agent Executor. Support for additional agent types,
|
||||
use directly with Chains, etc will be added in the future.
|
||||
|
||||
You may also be interested in using
|
||||
[StreamlitChatMessageHistory](/docs/integrations/memory/streamlit_chat_message_history) for LangChain.
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<opml version="1.0">
|
||||
<head>
|
||||
<title>Sample RSS feed subscriptions</title>
|
||||
</head>
|
||||
<body>
|
||||
<outline text="Tech" title="Tech">
|
||||
<outline type="rss" text="Engadget" title="Engadget" xmlUrl="http://www.engadget.com/rss-full.xml" htmlUrl="http://www.engadget.com"/>
|
||||
<outline type="rss" text="Ars Technica - All content" title="Ars Technica - All content" xmlUrl="http://feeds.arstechnica.com/arstechnica/index/" htmlUrl="https://arstechnica.com"/>
|
||||
</outline>
|
||||
</body>
|
||||
</opml>
|
||||
@@ -2,11 +2,14 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0b02f34c",
|
||||
"id": "b0ed136e-6983-4893-ae1b-b75753af05f8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Drive Loader\n",
|
||||
"This notebook covers how to retrieve documents from Google Drive.\n",
|
||||
"# Google Drive\n",
|
||||
"\n",
|
||||
">[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.\n",
|
||||
"\n",
|
||||
"This notebook covers how to load documents from `Google Drive`. Currently, only `Google Docs` are supported.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
@@ -15,21 +18,12 @@
|
||||
"1. [Authorize credentials for desktop app](https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application)\n",
|
||||
"1. `pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib`\n",
|
||||
"\n",
|
||||
"## Instructions for retrieving your Google Docs data\n",
|
||||
"By default, the `GoogleDriveLoader` expects the `credentials.json` file to be `~/.credentials/credentials.json`, but this is configurable using the `GOOGLE_ACCOUNT_FILE` environment variable. \n",
|
||||
"The location of `token.json` use the same directory (or use the parameter `token_path`). Note that `token.json` will be created automatically the first time you use the loader.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a03b9067",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can obtain your folder and document id from the URL:\n",
|
||||
"* Folder: https://drive.google.com/drive/u/0/folders/1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5 -> folder id is `\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\"`\n",
|
||||
"* Document: https://docs.google.com/document/d/1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw/edit -> document id is `\"1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw\"`\n",
|
||||
"## 🧑 Instructions for ingesting your Google Docs data\n",
|
||||
"By default, the `GoogleDriveLoader` expects the `credentials.json` file to be `~/.credentials/credentials.json`, but this is configurable using the `credentials_path` keyword argument. Same thing with `token.json` - `token_path`. Note that `token.json` will be created automatically the first time you use the loader.\n",
|
||||
"\n",
|
||||
"The special value `root` is for your personal home."
|
||||
"`GoogleDriveLoader` can load from a list of Google Docs document ids or a folder id. You can obtain your folder and document id from the URL:\n",
|
||||
"* Folder: https://drive.google.com/drive/u/0/folders/1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5 -> folder id is `\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\"`\n",
|
||||
"* Document: https://docs.google.com/document/d/1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw/edit -> document id is `\"1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw\"`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -39,23 +33,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib"
|
||||
"!pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9bcb6cb1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"folder_id='root'\n",
|
||||
"#folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "878928a6-a5ae-4f74-b351-64e3b01733fe",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -67,7 +50,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "2216c83f-68e4-4d2f-8ea2-5878fb18bbe7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -75,215 +58,174 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" folder_id=\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\",\n",
|
||||
" # Optional: configure whether to recursively fetch files from subfolders. Defaults to False.\n",
|
||||
" recursive=False,\n",
|
||||
" num_results=2, # Maximum number of file to load\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "de5be5d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"By default, all files with these mime-type can be converted to `Document`.\n",
|
||||
"- text/text\n",
|
||||
"- text/plain\n",
|
||||
"- text/html\n",
|
||||
"- text/csv\n",
|
||||
"- text/markdown\n",
|
||||
"- image/png\n",
|
||||
"- image/jpeg\n",
|
||||
"- application/epub+zip\n",
|
||||
"- application/pdf\n",
|
||||
"- application/rtf\n",
|
||||
"- application/vnd.google-apps.document (GDoc)\n",
|
||||
"- application/vnd.google-apps.presentation (GSlide)\n",
|
||||
"- application/vnd.google-apps.spreadsheet (GSheet)\n",
|
||||
"- application/vnd.google.colaboratory (Notebook colab)\n",
|
||||
"- application/vnd.openxmlformats-officedocument.presentationml.presentation (PPTX)\n",
|
||||
"- application/vnd.openxmlformats-officedocument.wordprocessingml.document (DOCX)\n",
|
||||
"\n",
|
||||
"It's possible to update or customize this. See the documentation of `GDriveLoader`.\n",
|
||||
"\n",
|
||||
"But, the corresponding packages must be installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1bca45c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "8f3b6aa0-b45d-4e37-8c50-5bebe70fdb9d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for doc in loader.load():\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "31170e71",
|
||||
"id": "2721ba8a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Customize the search pattern\n",
|
||||
"\n",
|
||||
"All parameter compatible with Google [`list()`](https://developers.google.com/drive/api/v3/reference/files/list)\n",
|
||||
"API can be set.\n",
|
||||
"\n",
|
||||
"To specify the new pattern of the Google request, you can use a `PromptTemplate()`.\n",
|
||||
"The variables for the prompt can be set with `kwargs` in the constructor.\n",
|
||||
"Some pre-formated request are proposed (use `{query}`, `{folder_id}` and/or `{mime_type}`):\n",
|
||||
"\n",
|
||||
"You can customize the criteria to select the files. A set of predefined filter are proposed:\n",
|
||||
"| template | description |\n",
|
||||
"| -------------------------------------- | --------------------------------------------------------------------- |\n",
|
||||
"| gdrive-all-in-folder | Return all compatible files from a `folder_id` |\n",
|
||||
"| gdrive-query | Search `query` in all drives |\n",
|
||||
"| gdrive-by-name | Search file with name `query` |\n",
|
||||
"| gdrive-query-in-folder | Search `query` in `folder_id` (and sub-folders in `_recursive=true`) |\n",
|
||||
"| gdrive-mime-type | Search a specific `mime_type` |\n",
|
||||
"| gdrive-mime-type-in-folder | Search a specific `mime_type` in `folder_id` |\n",
|
||||
"| gdrive-query-with-mime-type | Search `query` with a specific `mime_type` |\n",
|
||||
"| gdrive-query-with-mime-type-and-folder | Search `query` with a specific `mime_type` and in `folder_id` |\n"
|
||||
"When you pass a `folder_id` by default all files of type document, sheet and pdf are loaded. You can modify this behaviour by passing a `file_types` argument "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0a47175f",
|
||||
"id": "2ff83b4c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" recursive=False,\n",
|
||||
" template=\"gdrive-query\", # Default template to use\n",
|
||||
" query=\"machine learning\",\n",
|
||||
" num_results=2, # Maximum number of file to load\n",
|
||||
" supportsAllDrives=False, # GDrive `list()` parameter\n",
|
||||
" folder_id=\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\",\n",
|
||||
" file_types=[\"document\", \"sheet\"]\n",
|
||||
" recursive=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6b80931",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing in Optional File Loaders\n",
|
||||
"\n",
|
||||
"When processing files other than Google Docs and Google Sheets, it can be helpful to pass an optional file loader to `GoogleDriveLoader`. If you pass in a file loader, that file loader will be used on documents that do not have a Google Docs or Google Sheets MIME type. Here is an example of how to load an Excel document from Google Drive using a file loader. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "94207e39",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import GoogleDriveLoader\n",
|
||||
"from langchain.document_loaders import UnstructuredFileIOLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15fbee0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"file_id = \"1x9WBtFPWMEAdjcJzPScRsjpjQvpSo_kz\"\n",
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" file_ids=[file_id],\n",
|
||||
" file_loader_cls=UnstructuredFileIOLoader,\n",
|
||||
" file_loader_kwargs={\"mode\": \"elements\"},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "100cf361",
|
||||
"execution_count": 3,
|
||||
"id": "98410bda",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for doc in loader.load():\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74e6e3aa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can customize your pattern."
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dcf07ff7",
|
||||
"execution_count": 4,
|
||||
"id": "e3e72221",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n \\n \\n Team\\n Location\\n Stanley Cups\\n \\n \\n Blues\\n STL\\n 1\\n \\n \\n Flyers\\n PHI\\n 2\\n \\n \\n Maple Leafs\\n TOR\\n 13\\n \\n \\n', metadata={'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'page_number': 1, 'page_name': 'Stanley Cups', 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'category': 'Table', 'source': 'https://drive.google.com/file/d/1aA6L2AR3g0CR-PW03HEZZo4NaVlKpaP7/view'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "238cd06f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also process a folder with a mix of files and Google Docs/Sheets using the following pattern:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0e2d093f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"folder_id = \"1asMOHY1BqBS84JcRbOag5LOJac74gpmD\"\n",
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" recursive=False,\n",
|
||||
" template=PromptTemplate(\n",
|
||||
" input_variables=[\"query\", \"query_name\"],\n",
|
||||
" template=\"fullText contains '{query}' and name contains '{query_name}' and trashed=false\",\n",
|
||||
" ), # Default template to use\n",
|
||||
" query=\"machine learning\",\n",
|
||||
" query_name=\"ML\", \n",
|
||||
" num_results=2, # Maximum number of file to load\n",
|
||||
")\n",
|
||||
"for doc in loader.load():\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
" file_loader_cls=UnstructuredFileIOLoader,\n",
|
||||
" file_loader_kwargs={\"mode\": \"elements\"},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e404472",
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b35ddcc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Modes for GSlide and GSheet\n",
|
||||
"\n",
|
||||
"The parameter `mode` accept differents values:\n",
|
||||
"- `\"document\"`: return the body of each documents\n",
|
||||
"- `\"snippets\"`: return the `description` of each files.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The parameter `gslide_mode` accept differents values:\n",
|
||||
"- `\"single\"` : one document with `<PAGE BREAK>`\n",
|
||||
"- `\"slide\"` : one document by slide\n",
|
||||
"- `\"elements\"` : one document for each `elements`."
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3cc141e0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n \\n \\n Team\\n Location\\n Stanley Cups\\n \\n \\n Blues\\n STL\\n 1\\n \\n \\n Flyers\\n PHI\\n 2\\n \\n \\n Maple Leafs\\n TOR\\n 13\\n \\n \\n', metadata={'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'page_number': 1, 'page_name': 'Stanley Cups', 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'category': 'Table', 'source': 'https://drive.google.com/file/d/1aA6L2AR3g0CR-PW03HEZZo4NaVlKpaP7/view'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b33d1a53",
|
||||
"id": "e312268a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" template=\"gdrive-mime-type\",\n",
|
||||
" mime_type=\"application/vnd.google-apps.presentation\", # Only GSlide files\n",
|
||||
" gslide_mode=\"slide\",\n",
|
||||
" num_results=2, # Maximum number of file to load\n",
|
||||
")\n",
|
||||
"for doc in loader.load():\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "498f0451",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The parameter `gsheet_mode` accept differents values:\n",
|
||||
"- `\"single\"`: Generate one document by line\n",
|
||||
"- `\"elements\"` : one document with markdown array and `<PAGE BREAK>` tags."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "884c4ca6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" template=\"gdrive-mime-type\",\n",
|
||||
" mime_type=\"application/vnd.google-apps.spreadsheet\", # Only GSheet files\n",
|
||||
" gsheet_mode=\"elements\",\n",
|
||||
" num_results=2, # Maximum number of file to load\n",
|
||||
")\n",
|
||||
"for doc in loader.load():\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -302,7 +244,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
144
docs/extras/integrations/document_loaders/nuclia.ipynb
Normal file
144
docs/extras/integrations/document_loaders/nuclia.ipynb
Normal file
@@ -0,0 +1,144 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nuclia Understanding API document loader\n",
|
||||
"\n",
|
||||
"[Nuclia](https://nuclia.com) automatically indexes your unstructured data from any internal and external source, providing optimized search results and generative answers. It can handle video and audio transcription, image content extraction, and document parsing.\n",
|
||||
"\n",
|
||||
"The Nuclia Understanding API supports the processing of unstructured data, including text, web pages, documents, and audio/video contents. It extracts all texts wherever they are (using speech-to-text or OCR when needed), it also extracts metadata, embedded files (like images in a PDF), and web links. If machine learning is enabled, it identifies entities, provides a summary of the content and generates embeddings for all the sentences.\n",
|
||||
"\n",
|
||||
"To use the Nuclia Understanding API, you need to have a Nuclia account. You can create one for free at [https://nuclia.cloud](https://nuclia.cloud), and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install --upgrade protobuf\n",
|
||||
"#!pip install nucliadb-protos"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"NUCLIA_ZONE\"] = \"<YOUR_ZONE>\" # e.g. europe-1\n",
|
||||
"os.environ[\"NUCLIA_NUA_KEY\"] = \"<YOUR_API_KEY>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the Nuclia document loader, you need to instantiate a `NucliaUnderstandingAPI` tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.nuclia import NucliaUnderstandingAPI\n",
|
||||
"\n",
|
||||
"nua = NucliaUnderstandingAPI(enable_ml=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.nuclia import NucliaLoader\n",
|
||||
"\n",
|
||||
"loader = NucliaLoader(\"./interview.mp4\", nua)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now call the `load` the document in a loop until you get the document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"pending = True\n",
|
||||
"while pending:\n",
|
||||
" time.sleep(15)\n",
|
||||
" docs = loader.load()\n",
|
||||
" if len(docs) > 0:\n",
|
||||
" print(docs[0].page_content)\n",
|
||||
" print(docs[0].metadata)\n",
|
||||
" pending = False\n",
|
||||
" else:\n",
|
||||
" print(\"waiting...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieved information\n",
|
||||
"\n",
|
||||
"Nuclia returns the following information:\n",
|
||||
"\n",
|
||||
"- file metadata\n",
|
||||
"- extracted text\n",
|
||||
"- nested text (like text in an embedded image)\n",
|
||||
"- paragraphs and sentences splitting (defined by the position of their first and last characters, plus start time and end time for a video or audio file)\n",
|
||||
"- links\n",
|
||||
"- a thumbnail\n",
|
||||
"- embedded files\n",
|
||||
"\n",
|
||||
"Note:\n",
|
||||
"\n",
|
||||
" Generated files (thumbnail, extracted embedded files, etc.) are provided as a token. You can download them with the [`/processing/download` endpoint](https://docs.nuclia.dev/docs/api#operation/Download_binary_file_processing_download_get).\n",
|
||||
"\n",
|
||||
" Also at any level, if an attribute exceeds a certain size, it will be put in a downloadable file and will be replaced in the document by a file pointer. This will consist of `{\"file\": {\"uri\": \"JWT_TOKEN\"}}`. The rule is that if the size of the message is greater than 1000000 characters, the biggest parts will be moved to downloadable files. First, the compression process will target vectors. If that is not enough, it will target large field metadata, and finally it will target extracted text.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"We may want to process load all URLs under a root directory.\n",
|
||||
"\n",
|
||||
"For example, let's look at the [LangChain JS documentation](https://js.langchain.com/docs/).\n",
|
||||
"For example, let's look at the [Python 3.9 Document](https://docs.python.org/3.9/).\n",
|
||||
"\n",
|
||||
"This has many interesting child pages that we may want to read in bulk.\n",
|
||||
"\n",
|
||||
@@ -19,13 +19,28 @@
|
||||
" \n",
|
||||
"We do this using the `RecursiveUrlLoader`.\n",
|
||||
"\n",
|
||||
"This also gives us the flexibility to exclude some children (e.g., the `api` directory with > 800 child pages)."
|
||||
"This also gives us the flexibility to exclude some children, customize the extractor, and more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1be8094f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Parameters\n",
|
||||
"- url: str, the target url to crawl.\n",
|
||||
"- exclude_dirs: Optional[str], webpage directories to exclude.\n",
|
||||
"- use_async: Optional[bool], wether to use async requests, using async requests is usually faster in large tasks. However, async will disable the lazy loading feature(the function still works, but it is not lazy). By default, it is set to False.\n",
|
||||
"- extractor: Optional[Callable[[str], str]], a function to extract the text of the document from the webpage, by default it returns the page as it is. It is recommended to use tools like goose3 and beautifulsoup to extract the text. By default, it just returns the page as it is.\n",
|
||||
"- max_depth: Optional[int] = None, the maximum depth to crawl. By default, it is set to 2. If you need to crawl the whole website, set it to a number that is large enough would simply do the job.\n",
|
||||
"- timeout: Optional[int] = None, the timeout for each request, in the unit of seconds. By default, it is set to 10.\n",
|
||||
"- prevent_outside: Optional[bool] = None, whether to prevent crawling outside the root url. By default, it is set to True."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2e3532b2",
|
||||
"execution_count": null,
|
||||
"id": "23c18539",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -42,13 +57,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d69e5620",
|
||||
"execution_count": null,
|
||||
"id": "55394afe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"url = \"https://js.langchain.com/docs/modules/memory/examples/\"\n",
|
||||
"loader = RecursiveUrlLoader(url=url)\n",
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"\n",
|
||||
"url = \"https://docs.python.org/3.9/\"\n",
|
||||
"loader = RecursiveUrlLoader(url=url, max_depth=2, extractor=lambda x: Soup(x, \"html.parser\").text)\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
@@ -61,7 +78,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"12"
|
||||
"'\\n\\n\\n\\n\\nPython Frequently Asked Questions — Python 3.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -70,19 +87,21 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
"docs[0].page_content[:50]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "89355b7c",
|
||||
"id": "13bd7e16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\n\\n\\n\\nBuffer Window Memory | 🦜️🔗 Langchain\\n\\n\\n\\n\\n\\nSki'"
|
||||
"{'source': 'https://docs.python.org/3.9/library/index.html',\n",
|
||||
" 'title': 'The Python Standard Library — Python 3.9.17 documentation',\n",
|
||||
" 'language': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -91,137 +110,48 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content[:50]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "13bd7e16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'source': 'https://js.langchain.com/docs/modules/memory/examples/buffer_window_memory',\n",
|
||||
" 'title': 'Buffer Window Memory | 🦜️🔗 Langchain',\n",
|
||||
" 'description': 'BufferWindowMemory keeps track of the back-and-forths in conversation, and then uses a window of size k to surface the last k back-and-forths to use as memory.',\n",
|
||||
" 'language': 'en'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].metadata"
|
||||
"docs[-1].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "40fc13ef",
|
||||
"id": "5866e5a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's try a more extensive example, the `docs` root dir.\n",
|
||||
"\n",
|
||||
"We will skip everything under `api`.\n",
|
||||
"\n",
|
||||
"For this, we can `lazy_load` each page as we crawl the tree, using `WebBaseLoader` to load each as we go."
|
||||
"However, since it's hard to perform a perfect filter, you may still see some irrelevant results in the results. You can perform a filter on the returned documents by yourself, if it's needed. Most of the time, the returned results are good enough."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ec8ecef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Testing on LangChain docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5c938b9f",
|
||||
"execution_count": 2,
|
||||
"id": "349b5598",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"url = \"https://js.langchain.com/docs/\"\n",
|
||||
"exclude_dirs = [\"https://js.langchain.com/docs/api/\"]\n",
|
||||
"loader = RecursiveUrlLoader(url=url, exclude_dirs=exclude_dirs)\n",
|
||||
"# Lazy load each\n",
|
||||
"docs = [print(doc) or doc for doc in loader.lazy_load()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "30ff61d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load all pages\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "457e30f3",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"188"
|
||||
"8"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"url = \"https://js.langchain.com/docs/modules/memory/integrations/\"\n",
|
||||
"loader = RecursiveUrlLoader(url=url)\n",
|
||||
"docs = loader.load()\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "bca80b4a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\n\\n\\n\\nAgent Simulations | 🦜️🔗 Langchain\\n\\n\\n\\n\\n\\nSkip t'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content[:50]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "df97cf22",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'source': 'https://js.langchain.com/docs/use_cases/agent_simulations/',\n",
|
||||
" 'title': 'Agent Simulations | 🦜️🔗 Langchain',\n",
|
||||
" 'description': 'Agent simulations involve taking multiple agents and having them interact with each other.',\n",
|
||||
" 'language': 'en'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].metadata"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
311
docs/extras/integrations/document_loaders/rss.ipynb
Normal file
311
docs/extras/integrations/document_loaders/rss.ipynb
Normal file
@@ -0,0 +1,311 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2dfc4698",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RSS Feeds\n",
|
||||
"\n",
|
||||
"This covers how to load HTML news articles from a list of RSS feed URLs into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e7c2cd52-c1f7-4a06-8539-b0117da91fba",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install feedparser newspaper3k listparser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "16c3699e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import RSSFeedLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "836fbac1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"urls = [\"https://news.ycombinator.com/rss\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33089aba-ff74-4d00-8f40-9449c29587cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Pass in urls to load them into Documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00f46fda",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = RSSFeedLoader(urls=urls)\n",
|
||||
"data = loader.load()\n",
|
||||
"print(len(data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "b447468cc42266d0",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(next Rich)\n",
|
||||
"\n",
|
||||
"04 August 2023\n",
|
||||
"\n",
|
||||
"Rich Hickey\n",
|
||||
"\n",
|
||||
"It is with a mixture of heartache and optimism that I announce today my (long planned) retirement from commercial software development, and my employment at Nubank. It’s been thrilling to see Clojure and Datomic successfully applied at scale.\n",
|
||||
"\n",
|
||||
"I look forward to continuing to lead ongoing work maintaining and enhancing Clojure with Alex, Stu, Fogus and many others, as an independent developer once again. We have many useful things planned for 1.12 and beyond. The community remains friendly, mature and productive, and is taking Clojure into many interesting new domains.\n",
|
||||
"\n",
|
||||
"I want to highlight and thank Nubank for their ongoing sponsorship of Alex, Fogus and the core team, as well as the Clojure community at large.\n",
|
||||
"\n",
|
||||
"Stu will continue to lead the development of Datomic at Nubank, where the Datomic team grows and thrives. I’m particularly excited to see where the new free availability of Datomic will lead.\n",
|
||||
"\n",
|
||||
"My time with Cognitect remains the highlight of my career. I have learned from absolutely everyone on our team, and am forever grateful to all for our interactions. There are too many people to thank here, but I must extend my sincerest appreciation and love to Stu and Justin for (repeatedly) taking a risk on me and my ideas, and for being the best of partners and friends, at all times fully embodying the notion of integrity. And of course to Alex Miller - who possesses in abundance many skills I lack, and without whose indomitable spirit, positivity and friendship Clojure would not have become what it did.\n",
|
||||
"\n",
|
||||
"I have made many friends through Clojure and Cognitect, and I hope to nurture those friendships moving forward.\n",
|
||||
"\n",
|
||||
"Retirement returns me to the freedom and independence I had when originally developing Clojure. The journey continues!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(data[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c36d3b0d329faf2a",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"You can pass arguments to the NewsURLLoader which it uses to load articles."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "5fdada62470d3019",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error fetching or processing https://twitter.com/andrewmccalip/status/1687405505604734978, exception: You must `parse()` an article first!\n",
|
||||
"Error processing entry https://twitter.com/andrewmccalip/status/1687405505604734978, exception: list index out of range\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"13\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = RSSFeedLoader(urls=urls, nlp=True)\n",
|
||||
"data = loader.load()\n",
|
||||
"print(len(data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "11d71963f7735c1d",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['nubank',\n",
|
||||
" 'alex',\n",
|
||||
" 'stu',\n",
|
||||
" 'taking',\n",
|
||||
" 'team',\n",
|
||||
" 'remains',\n",
|
||||
" 'rich',\n",
|
||||
" 'clojure',\n",
|
||||
" 'thank',\n",
|
||||
" 'planned',\n",
|
||||
" 'datomic']"
|
||||
]
|
||||
},
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data[0].metadata['keywords']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "9fb64ba0e8780966",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'It’s been thrilling to see Clojure and Datomic successfully applied at scale.\\nI look forward to continuing to lead ongoing work maintaining and enhancing Clojure with Alex, Stu, Fogus and many others, as an independent developer once again.\\nThe community remains friendly, mature and productive, and is taking Clojure into many interesting new domains.\\nI want to highlight and thank Nubank for their ongoing sponsorship of Alex, Fogus and the core team, as well as the Clojure community at large.\\nStu will continue to lead the development of Datomic at Nubank, where the Datomic team grows and thrives.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data[0].metadata['summary']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98ac26c488315bff",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"You can also use an OPML file such as a Feedly export. Pass in either a URL or the OPML contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "8b6f07ae526a897c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error fetching http://www.engadget.com/rss-full.xml, exception: Error fetching http://www.engadget.com/rss-full.xml, exception: document declared as us-ascii, but parsed as utf-8\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"20\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with open(\"example_data/sample_rss_feeds.opml\", \"r\") as f:\n",
|
||||
" loader = RSSFeedLoader(opml=f.read())\n",
|
||||
"data = loader.load()\n",
|
||||
"print(len(data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "b68a26b3",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The electric vehicle startup Fisker made a splash in Huntington Beach last night, showing off a range of new EVs it plans to build alongside the Fisker Ocean, which is slowly beginning deliveries in Europe and the US. With shades of Lotus circa 2010, it seems there\\'s something for most tastes, with a powerful four-door GT, a versatile pickup truck, and an affordable electric city car.\\n\\n\"We want the world to know that we have big plans and intend to move into several different segments, redefining each with our unique blend of design, innovation, and sustainability,\" said CEO Henrik Fisker.\\n\\nStarting with the cheapest, the Fisker PEAR—a cutesy acronym for \"Personal Electric Automotive Revolution\"—is said to use 35 percent fewer parts than other small EVs. Although it\\'s a smaller car, the PEAR seats six thanks to front and rear bench seats. Oh, and it has a frunk, which the company is calling the \"froot,\" something that will satisfy some British English speakers like Ars\\' friend and motoring journalist Jonny Smith.\\n\\nBut most exciting is the price—starting at $29,900 and scheduled for 2025. Fisker plans to contract with Foxconn to build the PEAR in Lordstown, Ohio, meaning it would be eligible for federal tax incentives.\\n\\nAdvertisement\\n\\nThe Fisker Alaska is the company\\'s pickup truck, built on a modified version of the platform used by the Ocean. It has an extendable cargo bed, which can be as little as 4.5 feet (1,371 mm) or as much as 9.2 feet (2,804 mm) long. Fisker claims it will be both the lightest EV pickup on sale and the most sustainable pickup truck in the world. Range will be an estimated 230–240 miles (370–386 km).\\n\\nThis, too, is slated for 2025, and also at a relatively affordable price, starting at $45,400. Fisker hopes to build this car in North America as well, although it isn\\'t saying where that might take place.\\n\\nFinally, there\\'s the Ronin, a four-door GT that bears more than a passing resemblance to the Fisker Karma, Henrik Fisker\\'s 2012 creation. There\\'s no price for this one, but Fisker says its all-wheel drive powertrain will boast 1,000 hp (745 kW) and will hit 60 mph from a standing start in two seconds—just about as fast as modern tires will allow. Expect a massive battery in this one, as Fisker says it\\'s targeting a 600-mile (956 km) range.\\n\\n\"Innovation and sustainability, along with design, are our three brand values. By 2027, we intend to produce the world’s first climate-neutral vehicle, and as our customers reinvent their relationships with mobility, we want to be a leader in software-defined transportation,\" Fisker said.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5a0cbe8-18a6-4af2-b447-7abb8b734451",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nuclia Understanding API document transformer\n",
|
||||
"\n",
|
||||
"[Nuclia](https://nuclia.com) automatically indexes your unstructured data from any internal and external source, providing optimized search results and generative answers. It can handle video and audio transcription, image content extraction, and document parsing.\n",
|
||||
"\n",
|
||||
"The Nuclia Understanding API document transformer splits text into paragraphs and sentences, identifies entities, provides a summary of the text and generates embeddings for all the sentences.\n",
|
||||
"\n",
|
||||
"To use the Nuclia Understanding API, you need to have a Nuclia account. You can create one for free at [https://nuclia.cloud](https://nuclia.cloud), and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro).\n",
|
||||
"\n",
|
||||
"from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install --upgrade protobuf\n",
|
||||
"#!pip install nucliadb-protos"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"NUCLIA_ZONE\"] = \"<YOUR_ZONE>\" # e.g. europe-1\n",
|
||||
"os.environ[\"NUCLIA_NUA_KEY\"] = \"<YOUR_API_KEY>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the Nuclia document transformer, you need to instantiate a `NucliaUnderstandingAPI` tool with `enable_ml` set to `True`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.nuclia import NucliaUnderstandingAPI\n",
|
||||
"\n",
|
||||
"nua = NucliaUnderstandingAPI(enable_ml=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The Nuclia document transformer must be called in async mode, so you need to use the `atransform_documents` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer\n",
|
||||
"from langchain.schema.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def process():\n",
|
||||
" documents = [\n",
|
||||
" Document(page_content=\"<TEXT 1>\", metadata={}),\n",
|
||||
" Document(page_content=\"<TEXT 2>\", metadata={}),\n",
|
||||
" Document(page_content=\"<TEXT 3>\", metadata={}),\n",
|
||||
" ]\n",
|
||||
" nuclia_transformer = NucliaTextTransformer(nua)\n",
|
||||
" transformed_documents = await nuclia_transformer.atransform_documents(documents)\n",
|
||||
" print(transformed_documents)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"asyncio.run(process())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Fireworks\n",
|
||||
"\n",
|
||||
">[Fireworks](https://www.fireworks.ai/) is an AI startup focused on accelerating product development on generative AI by creating an innovative AI experiment and production platform. \n",
|
||||
">[Fireworks](https://app.fireworks.ai/) accelerates product development on generative AI by creating an innovative AI experiment and production platform. \n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Fireworks` models."
|
||||
]
|
||||
@@ -37,7 +37,7 @@
|
||||
"\n",
|
||||
"Contact Fireworks AI for the an API Key to access our models\n",
|
||||
"\n",
|
||||
"Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-13b-chat."
|
||||
"Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-7b-chat."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -49,7 +49,7 @@
|
||||
"source": [
|
||||
"# Initialize a Fireworks LLM\n",
|
||||
"os.environ['FIREWORKS_API_KEY'] = \"\" #change this to your own API KEY\n",
|
||||
"llm = Fireworks(model_id=\"fireworks-llama-v2-13b-chat\")"
|
||||
"llm = Fireworks(model_id=\"accounts/fireworks/models/fireworks-llama-v2-13b-chat\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -73,11 +73,10 @@
|
||||
"You can use the LLMs to call the model for specified prompt(s). \n",
|
||||
"\n",
|
||||
"Current Specified Models: \n",
|
||||
"* fireworks-falcon-7b, fireworks-falcon-40b-w8a16\n",
|
||||
"* fireworks-guanaco-30b, fireworks-guanaco-33b-w8a16\n",
|
||||
"* fireworks-llama-7b, fireworks-llama-13b, fireworks-llama-30b-w8a16\n",
|
||||
"* fireworks-llama-v2-13b, fireworks-llama-v2-13b-chat, fireworks-llama-v2-13b-w8a16, fireworks-llama-v2-13b-chat-w8a16\n",
|
||||
"* fireworks-llama-v2-7b, fireworks-llama-v2-7b-chat, fireworks-llama-v2-7b-w8a16, fireworks-llama-v2-7b-chat-w8a16"
|
||||
"* accounts/fireworks/models/fireworks-falcon-7b, accounts/fireworks/models/fireworks-falcon-40b-w8a16\n",
|
||||
"* accounts/fireworks/models/fireworks-starcoder-1b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-3b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-7b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-16b-w8a16 \n",
|
||||
"* accounts/fireworks/models/fireworks-llama-v2-13b, accounts/fireworks/models/fireworks-llama-v2-13b-chat, accounts/fireworks/models/fireworks-llama-v2-13b-w8a16, accounts/fireworks/models/fireworks-llama-v2-13b-chat-w8a16\n",
|
||||
"* accounts/fireworks/models/fireworks-llama-v2-7b, accounts/fireworks/models/fireworks-llama-v2-7b-chat, accounts/fireworks/models/fireworks-llama-v2-7b-w8a16, accounts/fireworks/models/fireworks-llama-v2-7b-chat-w8a16, accounts/fireworks/models/fireworks-llama-v2-70b-chat-4gpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -147,7 +146,7 @@
|
||||
],
|
||||
"source": [
|
||||
"#setting parameters: model_id, temperature, max_tokens, top_p\n",
|
||||
"llm = Fireworks(model_id=\"fireworks-llama-v2-13b-chat\", temperature=0.7, max_tokens=15, top_p=1.0)\n",
|
||||
"llm = Fireworks(model_id=\"accounts/fireworks/models/fireworks-llama-v2-13b-chat\", temperature=0.7, max_tokens=15, top_p=1.0)\n",
|
||||
"print(llm(\"What's the weather like in Kansas City in December?\"))"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -4,12 +4,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Llama-cpp\n",
|
||||
"# Llama.cpp\n",
|
||||
"\n",
|
||||
"[llama-cpp](https://github.com/abetlen/llama-cpp-python) is a Python binding for [llama.cpp](https://github.com/ggerganov/llama.cpp). \n",
|
||||
"[llama-cpp-python](https://github.com/abetlen/llama-cpp-python) is a Python binding for [llama.cpp](https://github.com/ggerganov/llama.cpp). \n",
|
||||
"It supports [several LLMs](https://github.com/ggerganov/llama.cpp).\n",
|
||||
"\n",
|
||||
"This notebook goes over how to run `llama-cpp` within LangChain."
|
||||
"This notebook goes over how to run `llama-cpp-python` within LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -18,7 +18,7 @@
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"There is a bunch of options how to install the llama-cpp package: \n",
|
||||
"There are different options on how to install the llama-cpp package: \n",
|
||||
"- only CPU usage\n",
|
||||
"- CPU + GPU (using one of many BLAS backends)\n",
|
||||
"- Metal GPU (MacOS with Apple Silicon Chip) \n",
|
||||
@@ -61,7 +61,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**IMPORTANT**: If you have already installed a cpu only version of the package, you need to reinstall it from scratch: consider the following command: "
|
||||
"**IMPORTANT**: If you have already installed the CPU only version of the package, you need to reinstall it from scratch. Consider the following command: "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,7 +79,7 @@
|
||||
"source": [
|
||||
"### Installation with Metal\n",
|
||||
"\n",
|
||||
"`lama.cpp` supports Apple silicon first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks. Use the `FORCE_CMAKE=1` environment variable to force the use of cmake and install the pip package for the Metal support ([source](https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md)).\n",
|
||||
"`llama.cpp` supports Apple silicon first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks. Use the `FORCE_CMAKE=1` environment variable to force the use of cmake and install the pip package for the Metal support ([source](https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md)).\n",
|
||||
"\n",
|
||||
"Example installation with Metal Support:"
|
||||
]
|
||||
@@ -143,7 +143,7 @@
|
||||
"\n",
|
||||
"#### Compiling and installing\n",
|
||||
"\n",
|
||||
"In the same command prompt (anaconda prompt) you set the variables, you can cd into `llama-cpp-python` directory and run the following commands.\n",
|
||||
"In the same command prompt (anaconda prompt) you set the variables, you can `cd` into `llama-cpp-python` directory and run the following commands.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"python setup.py clean\n",
|
||||
@@ -164,7 +164,9 @@
|
||||
"source": [
|
||||
"Make sure you are following all instructions to [install all necessary model files](https://github.com/ggerganov/llama.cpp).\n",
|
||||
"\n",
|
||||
"You don't need an `API_TOKEN`!"
|
||||
"You don't need an `API_TOKEN` as you will run the LLM locally.\n",
|
||||
"\n",
|
||||
"It is worth understanding which models are suitable to be used on the desired machine."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,7 +229,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Llama-v2`"
|
||||
"Example using a LLaMA 2 7B model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -304,7 +306,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Llama-v1`"
|
||||
"Example using a LLaMA v1 model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -381,7 +383,7 @@
|
||||
"source": [
|
||||
"### GPU\n",
|
||||
"\n",
|
||||
"If the installation with BLAS backend was correct, you will see an `BLAS = 1` indicator in model properties.\n",
|
||||
"If the installation with BLAS backend was correct, you will see a `BLAS = 1` indicator in model properties.\n",
|
||||
"\n",
|
||||
"Two of the most important parameters for use with GPU are:\n",
|
||||
"\n",
|
||||
@@ -473,22 +475,15 @@
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Metal\n",
|
||||
"\n",
|
||||
"If the installation with Metal was correct, you will see an `NEON = 1` indicator in model properties.\n",
|
||||
"If the installation with Metal was correct, you will see a `NEON = 1` indicator in model properties.\n",
|
||||
"\n",
|
||||
"Two of the most important parameters for use with GPU are:\n",
|
||||
"Two of the most important GPU parameters are:\n",
|
||||
"\n",
|
||||
"- `n_gpu_layers` - determines how many layers of the model are offloaded to your Metal GPU, in the most case, set it to `1` is enough for Metal\n",
|
||||
"- `n_batch` - how many tokens are processed in parallel, default is 8, set to bigger number.\n",
|
||||
@@ -522,7 +517,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The rest are almost same as GPU, the console log will show the following log to indicate the Metal was enable properly.\n",
|
||||
"The console log will show the following log to indicate Metal was enable properly.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
@@ -530,7 +525,9 @@
|
||||
"...\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You also could check the `Activity Monitor` by watching the % GPU of the process, the % CPU will drop dramatically after turn on `n_gpu_layers=1`. Also for the first time call LLM, the performance might be slow due to the model compilation in Metal GPU."
|
||||
"You also could check `Activity Monitor` by watching the GPU usage of the process, the CPU usage will drop dramatically after turn on `n_gpu_layers=1`. \n",
|
||||
"\n",
|
||||
"For the first call to the LLM, the performance may be slow due to the model compilation in Metal GPU."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
196
docs/extras/integrations/llms/vllm.ipynb
Normal file
196
docs/extras/integrations/llms/vllm.ipynb
Normal file
@@ -0,0 +1,196 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "499c3142-2033-437d-a60a-731988ac6074",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# vLLM\n",
|
||||
"\n",
|
||||
"[vLLM](https://vllm.readthedocs.io/en/latest/index.html) is a fast and easy-to-use library for LLM inference and serving, offering:\n",
|
||||
"* State-of-the-art serving throughput \n",
|
||||
"* Efficient management of attention key and value memory with PagedAttention\n",
|
||||
"* Continuous batching of incoming requests\n",
|
||||
"* Optimized CUDA kernels\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to use a LLM with langchain and vLLM.\n",
|
||||
"\n",
|
||||
"To use, you should have the `vllm` python package installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8a3f2666-5c75-4797-967a-7915a247bf33",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install vllm -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "84e350f7-21f6-455b-b1f0-8b0116a2fd49",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO 08-06 11:37:33 llm_engine.py:70] Initializing an LLM engine with config: model='mosaicml/mpt-7b', tokenizer='mosaicml/mpt-7b', tokenizer_mode=auto, trust_remote_code=True, dtype=torch.bfloat16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=1, seed=0)\n",
|
||||
"INFO 08-06 11:37:41 llm_engine.py:196] # GPU blocks: 861, # CPU blocks: 512\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 1/1 [00:00<00:00, 2.00it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"What is the capital of France ? The capital of France is Paris.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import VLLM\n",
|
||||
"\n",
|
||||
"llm = VLLM(model=\"mosaicml/mpt-7b\",\n",
|
||||
" trust_remote_code=True, # mandatory for hf models\n",
|
||||
" max_new_tokens=128,\n",
|
||||
" top_k=10,\n",
|
||||
" top_p=0.95,\n",
|
||||
" temperature=0.8,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm(\"What is the capital of France ?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "94a3b41d-8329-4f8f-94f9-453d7f132214",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Integrate the model in an LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5605b7a1-fa63-49c1-934d-8b4ef8d71dd5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Processed prompts: 100%|██████████| 1/1 [00:01<00:00, 1.34s/it]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"1. The first Pokemon game was released in 1996.\n",
|
||||
"2. The president was Bill Clinton.\n",
|
||||
"3. Clinton was president from 1993 to 2001.\n",
|
||||
"4. The answer is Clinton.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"Who was the US president in the year the first Pokemon game was released?\"\n",
|
||||
"\n",
|
||||
"print(llm_chain.run(question))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "56826aba-d08b-4838-8bfa-ca96e463b25d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Distributed Inference\n",
|
||||
"\n",
|
||||
"vLLM supports distributed tensor-parallel inference and serving. \n",
|
||||
"\n",
|
||||
"To run multi-GPU inference with the LLM class, set the `tensor_parallel_size` argument to the number of GPUs you want to use. For example, to run inference on 4 GPUs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f8c25c35-47b5-459d-9985-3cf546e9ac16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import VLLM\n",
|
||||
"\n",
|
||||
"llm = VLLM(model=\"mosaicml/mpt-30b\",\n",
|
||||
" tensor_parallel_size=4,\n",
|
||||
" trust_remote_code=True, # mandatory for hf models\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm(\"What is the future of AI?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "conda_pytorch_p310",
|
||||
"language": "python",
|
||||
"name": "conda_pytorch_p310"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,8 +7,17 @@
|
||||
"source": [
|
||||
"# Streamlit Chat Message History\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use Streamlit to store chat message history. Note, StreamlitChatMessageHistory only works when run in a Streamlit app. For more on Streamlit check out their\n",
|
||||
"[getting started documentation](https://docs.streamlit.io/library/get-started)."
|
||||
"This notebook goes over how to store and use chat message history in a Streamlit app. StreamlitChatMessageHistory will store messages in\n",
|
||||
"[Streamlit session state](https://docs.streamlit.io/library/api-reference/session-state)\n",
|
||||
"at the specified `key=`. The default key is `\"langchain_messages\"`.\n",
|
||||
"\n",
|
||||
"- Note, StreamlitChatMessageHistory only works when run in a Streamlit app.\n",
|
||||
"- You may also be interested in [StreamlitCallbackHandler](/docs/integrations/callbacks/streamlit) for LangChain.\n",
|
||||
"- For more on Streamlit check out their\n",
|
||||
"[getting started documentation](https://docs.streamlit.io/library/get-started).\n",
|
||||
"\n",
|
||||
"You can see the [full app example running here](https://langchain-st-memory.streamlit.app/), and more examples in\n",
|
||||
"[github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -20,7 +29,7 @@
|
||||
"source": [
|
||||
"from langchain.memory import StreamlitChatMessageHistory\n",
|
||||
"\n",
|
||||
"history = StreamlitChatMessageHistory(\"foo\")\n",
|
||||
"history = StreamlitChatMessageHistory(key=\"chat_messages\")\n",
|
||||
"\n",
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"history.add_ai_message(\"whats up?\")"
|
||||
@@ -35,6 +44,90 @@
|
||||
"source": [
|
||||
"history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b60dc735",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can integrate StreamlitChatMessageHistory into ConversationBufferMemory and chains or agents as usual. The history will be persisted across re-runs of the Streamlit app within a given user session. A given StreamlitChatMessageHistory will NOT be persisted or shared across user sessions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "42ab5bf3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n",
|
||||
"\n",
|
||||
"# Optionally, specify your own session_state key for storing messages\n",
|
||||
"msgs = StreamlitChatMessageHistory(key=\"special_app_key\")\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"history\", chat_memory=msgs)\n",
|
||||
"if len(msgs.messages) == 0:\n",
|
||||
" msgs.add_ai_message(\"How can I help you?\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a29252de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"template = \"\"\"You are an AI chatbot having a conversation with a human.\n",
|
||||
"\n",
|
||||
"{history}\n",
|
||||
"Human: {human_input}\n",
|
||||
"AI: \"\"\"\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"history\", \"human_input\"], template=template)\n",
|
||||
"\n",
|
||||
"# Add the memory to an LLMChain as usual\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(), prompt=prompt, memory=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7cd99b4b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Conversational Streamlit apps will often re-draw each previous chat message on every re-run. This is easy to do by iterating through `StreamlitChatMessageHistory.messages`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3bdb637b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import streamlit as st\n",
|
||||
"\n",
|
||||
"for msg in msgs.messages:\n",
|
||||
" st.chat_message(msg.type).write(msg.content)\n",
|
||||
"\n",
|
||||
"if prompt := st.chat_input():\n",
|
||||
" st.chat_message(\"human\").write(prompt)\n",
|
||||
"\n",
|
||||
" # As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.\n",
|
||||
" response = llm_chain.run(prompt)\n",
|
||||
" st.chat_message(\"ai\").write(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7adaf3d6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**[View the final app](https://langchain-st-memory.streamlit.app/).**"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -19,4 +19,4 @@ llm = Fireworks(model="fireworks-llama-v2-13b-chat", max_tokens=256, temperature
|
||||
llm("Name 3 sports.")
|
||||
```
|
||||
|
||||
For a more detailed walkthrough, see [here](/docs/extras/modules/model_io/models/llms/integrations/Fireworks.ipynb).
|
||||
For a more detailed walkthrough, see [here](/docs/integrations/llms/Fireworks).
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.
|
||||
|
||||
All Google Drive API is supported, with integration with Google Doc, Google Sheet and Google Slide.
|
||||
Currently, only `Google Docs` are supported.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
@@ -20,22 +20,3 @@ See a [usage example and authorizing instructions](/docs/integrations/document_l
|
||||
```python
|
||||
from langchain.document_loaders import GoogleDriveLoader
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
See a [usage example and authorizing instructions](/docs/modules/data_connection/retrievers/integrations/google_drive.html).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import GoogleDriveRetriever
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
See a [usage example and authorizing instructions](/docs/modules/agents/tools/integrations/google_drive.html).
|
||||
|
||||
```python
|
||||
from langchain.tools import GoogleDriveSearchTool
|
||||
from langchain.utilities import GoogleDriveAPIWrapper
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,279 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b0ed136e-6983-4893-ae1b-b75753af05f8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Drive Retriever\n",
|
||||
"This notebook covers how to retrieve documents from Google Drive.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"1. Create a Google Cloud project or use an existing project\n",
|
||||
"1. Enable the [Google Drive API](https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com)\n",
|
||||
"1. [Authorize credentials for desktop app](https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application)\n",
|
||||
"1. `pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib`\n",
|
||||
"\n",
|
||||
"## Instructions for retrieving your Google Docs data\n",
|
||||
"By default, the `GoogleDriveRetriever` expects the `credentials.json` file to be `~/.credentials/credentials.json`, but this is configurable using the `GOOGLE_ACCOUNT_FILE` environment variable. \n",
|
||||
"The location of `token.json` use the same directory (or use the parameter `token_path`). Note that `token.json` will be created automatically the first time you use the retriever.\n",
|
||||
"\n",
|
||||
"`GoogleDriveRetriever` can retrieve a selection of files with some requests. \n",
|
||||
"\n",
|
||||
"By default, If you use a `folder_id`, all the files inside this folder can be retrieved to `Document`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "35b94a93-97de-4af8-9cca-de9ffb7930c3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can obtain your folder and document id from the URL:\n",
|
||||
"* Folder: https://drive.google.com/drive/u/0/folders/1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5 -> folder id is `\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\"`\n",
|
||||
"* Document: https://docs.google.com/document/d/1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw/edit -> document id is `\"1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw\"`\n",
|
||||
"\n",
|
||||
"The special value `root` is for your personal home."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c9665c9-a023-4078-9d95-e43021cecb6f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "878928a6-a5ae-4f74-b351-64e3b01733fe",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-09T10:45:59.438650905Z",
|
||||
"start_time": "2023-05-09T10:45:57.955900302Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import GoogleDriveRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "755907c2-145d-4f0f-9b15-07a628a2d2d2",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-09T10:45:59.442890834Z",
|
||||
"start_time": "2023-05-09T10:45:59.440941528Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"folder_id=\"root\"\n",
|
||||
"#folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2216c83f-68e4-4d2f-8ea2-5878fb18bbe7",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-09T10:45:59.795842403Z",
|
||||
"start_time": "2023-05-09T10:45:59.445262457Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleDriveRetriever(\n",
|
||||
" num_results=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fa339ca0-f478-440c-ba80-0e5f41a19ce1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"By default, all files with these mime-type can be converted to `Document`.\n",
|
||||
"- text/text\n",
|
||||
"- text/plain\n",
|
||||
"- text/html\n",
|
||||
"- text/csv\n",
|
||||
"- text/markdown\n",
|
||||
"- image/png\n",
|
||||
"- image/jpeg\n",
|
||||
"- application/epub+zip\n",
|
||||
"- application/pdf\n",
|
||||
"- application/rtf\n",
|
||||
"- application/vnd.google-apps.document (GDoc)\n",
|
||||
"- application/vnd.google-apps.presentation (GSlide)\n",
|
||||
"- application/vnd.google-apps.spreadsheet (GSheet)\n",
|
||||
"- application/vnd.google.colaboratory (Notebook colab)\n",
|
||||
"- application/vnd.openxmlformats-officedocument.presentationml.presentation (PPTX)\n",
|
||||
"- application/vnd.openxmlformats-officedocument.wordprocessingml.document (DOCX)\n",
|
||||
"\n",
|
||||
"It's possible to update or customize this. See the documentation of `GDriveRetriever`.\n",
|
||||
"\n",
|
||||
"But, the corresponding packages must be installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9dadec48",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8f3b6aa0-b45d-4e37-8c50-5bebe70fdb9d",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-09T10:46:00.990310466Z",
|
||||
"start_time": "2023-05-09T10:45:59.798774595Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"machine learning\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8ff33817-8619-4897-8742-2216b9934d2a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can customize the criteria to select the files. A set of predefined filter are proposed:\n",
|
||||
"| template | description |\n",
|
||||
"| -------------------------------------- | --------------------------------------------------------------------- |\n",
|
||||
"| gdrive-all-in-folder | Return all compatible files from a `folder_id` |\n",
|
||||
"| gdrive-query | Search `query` in all drives |\n",
|
||||
"| gdrive-by-name | Search file with name `query`) |\n",
|
||||
"| gdrive-query-in-folder | Search `query` in `folder_id` (and sub-folders in `_recursive=true`) |\n",
|
||||
"| gdrive-mime-type | Search a specific `mime_type` |\n",
|
||||
"| gdrive-mime-type-in-folder | Search a specific `mime_type` in `folder_id` |\n",
|
||||
"| gdrive-query-with-mime-type | Search `query` with a specific `mime_type` |\n",
|
||||
"| gdrive-query-with-mime-type-and-folder | Search `query` with a specific `mime_type` and in `folder_id` |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9977c712-9659-4959-b508-f59cc7d49d44",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleDriveRetriever(\n",
|
||||
" template=\"gdrive-query\", # Search everywhere\n",
|
||||
" num_results=2, # But take only 2 documents\n",
|
||||
")\n",
|
||||
"for doc in retriever.get_relevant_documents(\"machine learning\"):\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a5a0f3ef-26fb-4a5c-85f0-5aba90b682b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Else, you can customize the prompt with a specialized `PromptTemplate`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b0bbebde-0487-4d20-9d77-8070e4f0e0d6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate\n",
|
||||
"retriever = GoogleDriveRetriever(\n",
|
||||
" template=PromptTemplate(input_variables=['query'],\n",
|
||||
" # See https://developers.google.com/drive/api/guides/search-files\n",
|
||||
" template=\"(fullText contains '{query}') \"\n",
|
||||
" \"and mimeType='application/vnd.google-apps.document' \"\n",
|
||||
" \"and modifiedTime > '2000-01-01T00:00:00' \"\n",
|
||||
" \"and trashed=false\"),\n",
|
||||
" num_results=2,\n",
|
||||
" # See https://developers.google.com/drive/api/v3/reference/files/list\n",
|
||||
" includeItemsFromAllDrives=False,\n",
|
||||
" supportsAllDrives=False,\n",
|
||||
")\n",
|
||||
"for doc in retriever.get_relevant_documents(\"machine learning\"):\n",
|
||||
" print(f\"{doc.metadata['name']}:\")\n",
|
||||
" print(\"---\")\n",
|
||||
" print(doc.page_content.strip()[:60]+\"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9b6fed29-1666-452e-b677-401613270388",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Use GDrive 'description' metadata\n",
|
||||
"Each Google Drive has a `description` field in metadata (see the *details of a file*).\n",
|
||||
"Use the `snippets` mode to return the description of selected files.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "342dbe12-ed83-40f4-8957-0cc8c4609542",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleDriveRetriever(\n",
|
||||
" template='gdrive-mime-type-in-folder',\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" mime_type='application/vnd.google-apps.document', # Only Google Docs\n",
|
||||
" num_results=2,\n",
|
||||
" mode='snippets',\n",
|
||||
" includeItemsFromAllDrives=False,\n",
|
||||
" supportsAllDrives=False,\n",
|
||||
")\n",
|
||||
"retriever.get_relevant_documents(\"machine learning\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
222
docs/extras/integrations/retrievers/re_phrase.ipynb
Normal file
222
docs/extras/integrations/retrievers/re_phrase.ipynb
Normal file
@@ -0,0 +1,222 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e8624be2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RePhraseQueryRetriever\n",
|
||||
"\n",
|
||||
"Simple retriever that applies an LLM between the user input and the query pass the to retriever.\n",
|
||||
"\n",
|
||||
"It can be used to pre-process the user input in any way.\n",
|
||||
"\n",
|
||||
"The default prompt used in the `from_llm` classmethod:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"DEFAULT_TEMPLATE = \"\"\"You are an assistant tasked with taking a natural language \\\n",
|
||||
"query from a user and converting it into a query for a vectorstore. \\\n",
|
||||
"In this process, you strip out information that is not relevant for \\\n",
|
||||
"the retrieval task. Here is the user query: {question}\"\"\"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Create a vectorstore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1bfa6834",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"\n",
|
||||
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d0b51556",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig()\n",
|
||||
"logging.getLogger(\"langchain.retrievers.re_phraser\").setLevel(logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "20e1e787",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.retrievers import RePhraseQueryRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88c0a972",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the default prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "503994bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"retriever_from_llm = RePhraseQueryRetriever.from_llm(\n",
|
||||
" retriever=vectorstore.as_retriever(), llm=llm\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8d17ecc9",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.re_phraser:Re-phrased question: The user query can be converted into a query for a vectorstore as follows:\n",
|
||||
"\n",
|
||||
"\"approaches to Task Decomposition\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = retriever_from_llm.get_relevant_documents(\n",
|
||||
" \"Hi I'm Lance. What are the approaches to Task Decomposition?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "76d54f1a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.re_phraser:Re-phrased question: Query for vectorstore: \"Types of Memory\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = retriever_from_llm.get_relevant_documents(\n",
|
||||
" \"I live in San Francisco. What are the Types of Memory?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0513a6e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Supply a prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "410d6a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"QUERY_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=\"\"\"You are an assistant tasked with taking a natural languge query from a user\n",
|
||||
" and converting it into a query for a vectorstore. In the process, strip out all \n",
|
||||
" information that is not relevant for the retrieval task and return a new, simplified\n",
|
||||
" question for vectorstore retrieval. The new user query should be in pirate speech.\n",
|
||||
" Here is the user query: {question} \"\"\",\n",
|
||||
")\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2dbffdd3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever_from_llm_chain = RePhraseQueryRetriever(\n",
|
||||
" retriever=vectorstore.as_retriever(), llm_chain=llm_chain\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "103b4be3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.re_phraser:Re-phrased question: Ahoy matey! What be Maximum Inner Product Search, ye scurvy dog?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = retriever_from_llm_chain.get_relevant_documents(\n",
|
||||
" \"Hi I'm Lance. What is Maximum Inner Product Search?\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "a801b57c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -26,7 +26,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "393ac030",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -46,7 +46,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "98b1c017",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -133,6 +133,68 @@
|
||||
"source": [
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "363f3c04",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Save and load\n",
|
||||
"\n",
|
||||
"You can easily save and load this retriever, making it handy for local development!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "10c90d03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.save_local(\"testing.pkl\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "fb3b153c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever_copy = TFIDFRetriever.load_local(\"testing.pkl\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "c03ff3c7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='foo', metadata={}),\n",
|
||||
" Document(page_content='foo bar', metadata={}),\n",
|
||||
" Document(page_content='hello', metadata={}),\n",
|
||||
" Document(page_content='world', metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever_copy.get_relevant_documents(\"foo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d7c5728",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -151,7 +213,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "8a920a89",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -30,7 +30,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "f2d04da3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -41,17 +41,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "e6ecde96",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = AlephAlphaAsymmetricSemanticEmbedding()"
|
||||
"embeddings = AlephAlphaAsymmetricSemanticEmbedding(normalize=True, compress_to_size=128)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"id": "90e68411",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -61,7 +61,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"id": "55903233",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -79,7 +79,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"id": "eabb763a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -89,7 +89,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "0ad799f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -99,17 +99,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"id": "af86dc10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = AlephAlphaSymmetricSemanticEmbedding()"
|
||||
"embeddings = AlephAlphaSymmetricSemanticEmbedding(normalize=True, compress_to_size=128)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"id": "d292536f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -119,7 +119,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"id": "c704a7cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -130,7 +130,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "33492471",
|
||||
"id": "5d999f8f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -152,7 +152,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.13"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Drive tool\n",
|
||||
"\n",
|
||||
"This notebook walks through connecting a LangChain to the Google Drive API.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"1. Create a Google Cloud project or use an existing project\n",
|
||||
"1. Enable the [Google Drive API](https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com)\n",
|
||||
"1. [Authorize credentials for desktop app](https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application)\n",
|
||||
"1. `pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib`\n",
|
||||
"\n",
|
||||
"## Instructions for retrieving your Google Docs data\n",
|
||||
"By default, the `GoogleDriveTools` and `GoogleDriveWrapper` expects the `credentials.json` file to be `~/.credentials/credentials.json`, but this is configurable using the `GOOGLE_ACCOUNT_FILE` environment variable. \n",
|
||||
"The location of `token.json` use the same directory (or use the parameter `token_path`). Note that `token.json` will be created automatically the first time you use the tool.\n",
|
||||
"\n",
|
||||
"`GoogleDriveSearchTool` can retrieve a selection of files with some requests. \n",
|
||||
"\n",
|
||||
"By default, If you use a `folder_id`, all the files inside this folder can be retrieved to `Document`, if the name match the query.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can obtain your folder and document id from the URL:\n",
|
||||
"* Folder: https://drive.google.com/drive/u/0/folders/1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5 -> folder id is `\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\"`\n",
|
||||
"* Document: https://docs.google.com/document/d/1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw/edit -> document id is `\"1bfaMQ18_i56204VaQDVeAFpqEijJTgvurupdEDiaUQw\"`\n",
|
||||
"\n",
|
||||
"The special value `root` is for your personal home."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"folder_id=\"root\"\n",
|
||||
"#folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"By default, all files with these mime-type can be converted to `Document`.\n",
|
||||
"- text/text\n",
|
||||
"- text/plain\n",
|
||||
"- text/html\n",
|
||||
"- text/csv\n",
|
||||
"- text/markdown\n",
|
||||
"- image/png\n",
|
||||
"- image/jpeg\n",
|
||||
"- application/epub+zip\n",
|
||||
"- application/pdf\n",
|
||||
"- application/rtf\n",
|
||||
"- application/vnd.google-apps.document (GDoc)\n",
|
||||
"- application/vnd.google-apps.presentation (GSlide)\n",
|
||||
"- application/vnd.google-apps.spreadsheet (GSheet)\n",
|
||||
"- application/vnd.google.colaboratory (Notebook colab)\n",
|
||||
"- application/vnd.openxmlformats-officedocument.presentationml.presentation (PPTX)\n",
|
||||
"- application/vnd.openxmlformats-officedocument.wordprocessingml.document (DOCX)\n",
|
||||
"\n",
|
||||
"It's possible to update or customize this. See the documentation of `GoogleDriveAPIWrapper`.\n",
|
||||
"\n",
|
||||
"But, the corresponding packages must installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.google_drive import GoogleDriveAPIWrapper\n",
|
||||
"from langchain.tools.google_drive.tool import GoogleDriveSearchTool\n",
|
||||
"\n",
|
||||
"# By default, search only in the filename.\n",
|
||||
"tool = GoogleDriveSearchTool(\n",
|
||||
" api_wrapper=GoogleDriveAPIWrapper(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" num_results=2,\n",
|
||||
" template=\"gdrive-query-in-folder\", # Search in the body of documents\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"logging.basicConfig(level=logging.INFO)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.run(\"machine learning\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.description"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import load_tools\n",
|
||||
"tools = load_tools([\"google-drive-search\"],\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" template=\"gdrive-query-in-folder\",\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI\n",
|
||||
"from langchain.agents import initialize_agent, AgentType\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools=tools,\n",
|
||||
" llm=llm,\n",
|
||||
" agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"Search in google drive, who is 'Yann LeCun' ?\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Multion Toolkit\n",
|
||||
"# MultiOn Toolkit\n",
|
||||
"\n",
|
||||
"This notebook walks you through connecting LangChain to the MultiOn Client in your browser\n",
|
||||
"\n",
|
||||
@@ -18,7 +18,32 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --upgrade multion > /dev/null"
|
||||
"!pip install --upgrade multion langchain -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.agent_toolkits import MultionToolkit\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"toolkit = MultionToolkit()\n",
|
||||
"\n",
|
||||
"toolkit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -38,8 +63,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Authorize connection to your Browser extention\n",
|
||||
"import multion \n",
|
||||
"multion.login()\n"
|
||||
"import multion\n",
|
||||
"multion.login()\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,38 +83,18 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.agent_toolkits import create_multion_agent\n",
|
||||
"from langchain.tools.multion.tool import MultionClientTool\n",
|
||||
"from langchain.agents.agent_types import AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"agent_executor = create_multion_agent(\n",
|
||||
" llm=ChatOpenAI(temperature=0),\n",
|
||||
" tool=MultionClientTool(),\n",
|
||||
" agent_type=AgentType.OPENAI_FUNCTIONS,\n",
|
||||
" verbose=True\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run(\"show me the weather today\")"
|
||||
"from langchain import OpenAI\n",
|
||||
"from langchain.agents import initialize_agent, AgentType\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"from langchain.agents.agent_toolkits import MultionToolkit\n",
|
||||
"toolkit = MultionToolkit()\n",
|
||||
"tools=toolkit.get_tools()\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools=toolkit.get_tools(),\n",
|
||||
" llm=llm,\n",
|
||||
" agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose = True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -100,7 +106,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"Tweet about Elon Musk\"\n",
|
||||
" \"Tweet 'Hi from MultiOn'\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
|
||||
173
docs/extras/integrations/tools/nuclia.ipynb
Normal file
173
docs/extras/integrations/tools/nuclia.ipynb
Normal file
@@ -0,0 +1,173 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nuclia Understanding API tool\n",
|
||||
"\n",
|
||||
"[Nuclia](https://nuclia.com) automatically indexes your unstructured data from any internal and external source, providing optimized search results and generative answers. It can handle video and audio transcription, image content extraction, and document parsing.\n",
|
||||
"\n",
|
||||
"The Nuclia Understanding API supports the processing of unstructured data, including text, web pages, documents, and audio/video contents. It extracts all texts wherever it is (using speech-to-text or OCR when needed), it identifies entities, it aslo extracts metadata, embedded files (like images in a PDF), and web links. It also provides a summary of the content.\n",
|
||||
"\n",
|
||||
"To use the Nuclia Understanding API, you need to have a Nuclia account. You can create one for free at [https://nuclia.cloud](https://nuclia.cloud), and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install --upgrade protobuf\n",
|
||||
"#!pip install nucliadb-protos"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"NUCLIA_ZONE\"] = \"<YOUR_ZONE>\" # e.g. europe-1\n",
|
||||
"os.environ[\"NUCLIA_NUA_KEY\"] = \"<YOUR_API_KEY>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.nuclia import NucliaUnderstandingAPI\n",
|
||||
"\n",
|
||||
"nua = NucliaUnderstandingAPI(enable_ml=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can push files to the Nuclia Understanding API using the `push` action. As the processing is done asynchronously, the results might be returned in a different order than the files were pushed. That is why you need to provide an `id` to match the results with the corresponding file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"nua.run({\"action\": \"push\", \"id\": \"1\", \"path\": \"./report.docx\"})\n",
|
||||
"nua.run({\"action\": \"push\", \"id\": \"2\", \"path\": \"./interview.mp4\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now call the `pull` action in a loop until you get the JSON-formatted result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"pending = True\n",
|
||||
"data = None\n",
|
||||
"while pending:\n",
|
||||
" time.sleep(15)\n",
|
||||
" data = nua.run({\"action\": \"pull\", \"id\": \"1\", \"path\": None})\n",
|
||||
" if data:\n",
|
||||
" print(data)\n",
|
||||
" pending = False\n",
|
||||
" else:\n",
|
||||
" print(\"waiting...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also do it in one step in `async` mode, you only need to do a push, and it will wait until the results are pulled:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def process():\n",
|
||||
" data = await nua.arun(\n",
|
||||
" {\"action\": \"push\", \"id\": \"1\", \"path\": \"./talk.mp4\", \"text\": None}\n",
|
||||
" )\n",
|
||||
" print(data)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"asyncio.run(process())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieved information\n",
|
||||
"\n",
|
||||
"Nuclia returns the following information:\n",
|
||||
"\n",
|
||||
"- file metadata\n",
|
||||
"- extracted text\n",
|
||||
"- nested text (like text in an embedded image)\n",
|
||||
"- a summary (only when `enable_ml` is set to `True`)\n",
|
||||
"- paragraphs and sentences splitting (defined by the position of their first and last characters, plus start time and end time for a video or audio file)\n",
|
||||
"- named entities: people, dates, places, organizations, etc. (only when `enable_ml` is set to `True`)\n",
|
||||
"- links\n",
|
||||
"- a thumbnail\n",
|
||||
"- embedded files\n",
|
||||
"- the vector representations of the text (only when `enable_ml` is set to `True`)\n",
|
||||
"\n",
|
||||
"Note:\n",
|
||||
"\n",
|
||||
" Generated files (thumbnail, extracted embedded files, etc.) are provided as a token. You can download them with the [`/processing/download` endpoint](https://docs.nuclia.dev/docs/api#operation/Download_binary_file_processing_download_get).\n",
|
||||
"\n",
|
||||
" Also at any level, if an attribute exceeds a certain size, it will be put in a downloadable file and will be replaced in the document by a file pointer. This will consist of `{\"file\": {\"uri\": \"JWT_TOKEN\"}}`. The rule is that if the size of the message is greater than 1000000 characters, the biggest parts will be moved to downloadable files. First, the compression process will target vectors. If that is not enough, it will target large field metadata, and finally it will target extracted text.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -80,7 +80,7 @@
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
@@ -90,7 +90,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 3,
|
||||
"id": "5eabdb75",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -249,6 +249,50 @@
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30c8f57b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Serializing and De-Serializing to bytes\n",
|
||||
"\n",
|
||||
"you can pickle the FAISS Index by these functions. If you use embeddings model which is of 90 mb (sentence-transformers/all-MiniLM-L6-v2 or any other model), the resultant pickle size would be more than 90 mb. the size of the model is also included in the overall size. To overcome this, use the below functions. These functions only serializes FAISS index and size would be much lesser. this can be helpful if you wish to store the index in database like sql."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d8faead5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pkl = db.serialize_to_bytes() # serializes the faiss index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eb083247",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "r"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e36e220b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = FAISS.deserialize_from_bytes(embeddings = embeddings, serialized = pkl) # Load the index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "57da60d4",
|
||||
@@ -473,6 +517,67 @@
|
||||
"for doc in results:\n",
|
||||
" print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1becca53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Delete\n",
|
||||
"\n",
|
||||
"You can also delete ids. Note that the ids to delete should be the ids in the docstore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1408b870",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db.delete([db.index_to_docstore_id[0]])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d13daf33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"False"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Is now missing\n",
|
||||
"0 in db.index_to_docstore_id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "30ace43e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -491,7 +596,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.17"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -21,7 +21,10 @@
|
||||
"\n",
|
||||
"1. Leverage the `Rockset` console to create a [collection](https://rockset.com/docs/collections/) with the Write API as your source. In this walkthrough, we create a collection named `langchain_demo`. \n",
|
||||
" \n",
|
||||
" Configure the following [ingest transformation](https://rockset.com/docs/ingest-transformation/) to mark your embeddings field and take advantage of performance and storage optimizations:"
|
||||
" Configure the following [ingest transformation](https://rockset.com/docs/ingest-transformation/) to mark your embeddings field and take advantage of performance and storage optimizations:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" (We used OpenAI `text-embedding-ada-002` for this examples, where #length_of_vector_embedding = 1536)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -75,23 +78,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"id": "29505c1e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "InitializationException",
|
||||
"evalue": "The rockset client was initialized incorrectly: An api key must be provided as a parameter to the RocksetClient or the Configuration object.",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mInitializationException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[5], line 6\u001b[0m\n\u001b[1;32m 4\u001b[0m ROCKSET_API_KEY \u001b[39m=\u001b[39m os\u001b[39m.\u001b[39menviron\u001b[39m.\u001b[39mget(\u001b[39m\"\u001b[39m\u001b[39mROCKSET_API_KEY\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39m# Verify ROCKSET_API_KEY environment variable\u001b[39;00m\n\u001b[1;32m 5\u001b[0m ROCKSET_API_SERVER \u001b[39m=\u001b[39m rockset\u001b[39m.\u001b[39mRegions\u001b[39m.\u001b[39musw2a1 \u001b[39m# Verify Rockset region\u001b[39;00m\n\u001b[0;32m----> 6\u001b[0m rockset_client \u001b[39m=\u001b[39m rockset\u001b[39m.\u001b[39;49mRocksetClient(ROCKSET_API_SERVER, ROCKSET_API_KEY)\n\u001b[1;32m 8\u001b[0m COLLECTION_NAME\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mlangchain_demo\u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 9\u001b[0m TEXT_KEY\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mdescription\u001b[39m\u001b[39m'\u001b[39m\n",
|
||||
"File \u001b[0;32m~/Library/Python/3.9/lib/python/site-packages/rockset/rockset_client.py:242\u001b[0m, in \u001b[0;36mRocksetClient.__init__\u001b[0;34m(self, host, api_key, max_workers, config)\u001b[0m\n\u001b[1;32m 239\u001b[0m config\u001b[39m.\u001b[39mhost \u001b[39m=\u001b[39m host\n\u001b[1;32m 241\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m config\u001b[39m.\u001b[39mapi_key:\n\u001b[0;32m--> 242\u001b[0m \u001b[39mraise\u001b[39;00m InitializationException(\n\u001b[1;32m 243\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mAn api key must be provided as a parameter to the RocksetClient or the Configuration object.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 244\u001b[0m )\n\u001b[1;32m 246\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mapi_client \u001b[39m=\u001b[39m ApiClient(config, max_workers\u001b[39m=\u001b[39mmax_workers)\n\u001b[1;32m 248\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mAliases \u001b[39m=\u001b[39m AliasesApiWrapper(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mapi_client)\n",
|
||||
"\u001b[0;31mInitializationException\u001b[0m: The rockset client was initialized incorrectly: An api key must be provided as a parameter to the RocksetClient or the Configuration object."
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import rockset\n",
|
||||
@@ -118,18 +108,7 @@
|
||||
"execution_count": null,
|
||||
"id": "9740d8c4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31mRunning cells with '/opt/local/bin/python3.11' requires the ipykernel package.\n",
|
||||
"\u001b[1;31mRun the following command to install 'ipykernel' into the Python environment. \n",
|
||||
"\u001b[1;31mCommand: '/opt/local/bin/python3.11 -m pip install ipykernel -U --user --force-reinstall'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
@@ -155,20 +134,9 @@
|
||||
"execution_count": null,
|
||||
"id": "85b6a6c5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31mRunning cells with '/opt/local/bin/python3.11' requires the ipykernel package.\n",
|
||||
"\u001b[1;31mRun the following command to install 'ipykernel' into the Python environment. \n",
|
||||
"\u001b[1;31mCommand: '/opt/local/bin/python3.11 -m pip install ipykernel -U --user --force-reinstall'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings() # Verify OPENAI_KEY environment variable\n",
|
||||
"embeddings = OpenAIEmbeddings() # Verify OPENAI_API_KEY environment variable\n",
|
||||
"\n",
|
||||
"docsearch = Rockset(\n",
|
||||
" client=rockset_client,\n",
|
||||
@@ -194,22 +162,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "0bbf3df0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'docsearch' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[1], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m query \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mWhat did the president say about Ketanji Brown Jackson?\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m----> 2\u001b[0m output \u001b[39m=\u001b[39m docsearch\u001b[39m.\u001b[39msimilarity_search_with_relevance_scores(query, \u001b[39m4\u001b[39m, Rockset\u001b[39m.\u001b[39mDistanceFunction\u001b[39m.\u001b[39mCOSINE_SIM)\n\u001b[1;32m 4\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39moutput length:\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mlen\u001b[39m(output))\n\u001b[1;32m 5\u001b[0m \u001b[39mfor\u001b[39;00m d, dist \u001b[39min\u001b[39;00m output:\n",
|
||||
"\u001b[0;31mNameError\u001b[0m: name 'docsearch' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"output = docsearch.similarity_search_with_relevance_scores(\n",
|
||||
@@ -313,7 +269,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
190
docs/extras/integrations/vectorstores/scann.ipynb
Normal file
190
docs/extras/integrations/vectorstores/scann.ipynb
Normal file
@@ -0,0 +1,190 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4afbbb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ScaNN\n",
|
||||
"\n",
|
||||
"ScaNN (Scalable Nearest Neighbors) is a method for efficient vector similarity search at scale.\n",
|
||||
"\n",
|
||||
"ScaNN includes search space pruning and quantization for Maximum Inner Product Search and also supports other distance functions such as Euclidean distance. The implementation is optimized for x86 processors with AVX2 support. See its [Google Research github](https://github.com/google-research/google-research/tree/master/scann) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "082f593e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"Install ScaNN through pip. Alternatively, you can follow instructions on the [ScaNN Website](https://github.com/google-research/google-research/tree/master/scann#building-from-source) to install from source."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a35e4f09",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install scann"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "44bf38a8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieval Demo\n",
|
||||
"\n",
|
||||
"Below we show how to use ScaNN in conjunction with Huggingface Embeddings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "377bc723",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': 'state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.embeddings import HuggingFaceEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import ScaNN\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"from langchain.embeddings import TensorflowHubEmbeddings\n",
|
||||
"embeddings = HuggingFaceEmbeddings()\n",
|
||||
"\n",
|
||||
"db = ScaNN.from_documents(docs, embeddings)\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = db.similarity_search(query)\n",
|
||||
"\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ad5b151",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RetrievalQA Demo\n",
|
||||
"\n",
|
||||
"Next, we demonstrate using ScaNN in conjunction with Google PaLM API.\n",
|
||||
"\n",
|
||||
"You can obtain an API key from https://developers.generativeai.google/tutorials/setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "fc27ad51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.chat_models import google_palm\n",
|
||||
"\n",
|
||||
"palm_client = google_palm.ChatGooglePalm(google_api_key='YOUR_GOOGLE_PALM_API_KEY')\n",
|
||||
"\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=palm_client,\n",
|
||||
" chain_type=\"stuff\",\n",
|
||||
" retriever=db.as_retriever(search_kwargs={'k': 10})\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "5b77f919",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The president said that Ketanji Brown Jackson is one of our nation's top legal minds, who will continue Justice Breyer's legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(qa.run('What did the president say about Ketanji Brown Jackson?'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "0c6deec6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The president did not mention Michael Phelps in his speech.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(qa.run('What did the president say about Michael Phelps?'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a49f4a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Save and loading local retrieval index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "6b7496b9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db.save_local('/tmp/db', 'state_of_union')\n",
|
||||
"restored_db = ScaNN.load_local('/tmp/db', embeddings, index_name='state_of_union')"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -8,32 +8,55 @@
|
||||
"source": [
|
||||
"# Vectara\n",
|
||||
"\n",
|
||||
">[Vectara](https://vectara.com/) is a API platform for building LLM-powered applications. It provides a simple to use API for document indexing and query that is managed by Vectara and is optimized for performance and accuracy. \n",
|
||||
">[Vectara](https://vectara.com/) is a API platform for building GenAI applications. It provides an easy-to-use API for document indexing and query that is managed by Vectara and is optimized for performance and accuracy. \n",
|
||||
"See the [Vectara API documentation ](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Vectara`'s integration with langchain.\n",
|
||||
"Note that unlike many other integrations in this category, Vectara provides an end-to-end managed service for [Grounded Generation](https://vectara.com/grounded-generation/) (aka retrieval agumented generation), which includes:\n",
|
||||
"1. A way to extract text from document files and chunk them into sentences.\n",
|
||||
"2. Its own embeddings model and vector store - each text segment is encoded into a vector embedding and stored in the Vectara internal vector store\n",
|
||||
"3. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching))\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Vectara` vector database or the `Vectara` retriever. \n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation ](https://docs.vectara.com/docs/) for more information on how to use the API."
|
||||
"All of these are supported in this LangChain integration."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-04-04T10:51:22.282884Z",
|
||||
"start_time": "2023-04-04T10:51:21.408077Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"cell_type": "markdown",
|
||||
"id": "dc0f4344",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps:\n",
|
||||
"1. [Sign up](https://console.vectara.com/signup) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: customer ID, corpus ID and api_key.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
"> For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
"from langchain.embeddings import FakeEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Vectara\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"2. Add them to the Vectara vectorstore constructor:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectorstore = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -44,8 +67,21 @@
|
||||
"source": [
|
||||
"## Connecting to Vectara from LangChain\n",
|
||||
"\n",
|
||||
"The Vectara API provides simple API endpoints for indexing and querying, which is encapsulated in the Vectara integration.\n",
|
||||
"First let's ingest the documents using the from_documents() method:"
|
||||
"To get started, let's ingest the documents using the from_documents() method.\n",
|
||||
"We assume here that you've added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and query+indexing VECTARA_API_KEY as environment variables."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "04a1f1a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import FakeEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Vectara\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -88,7 +124,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Vectara's indexing API provides a file upload API where the file is handled directly by Vectara - pre-processed, chunked optimally and added to the Vectara vector store.\n",
|
||||
"To use this, we added the add_files() method (and from_files()). \n",
|
||||
"To use this, we added the add_files() method (as well as from_files()). \n",
|
||||
"\n",
|
||||
"Let's see this in action. We pick two PDF documents to upload: \n",
|
||||
"1. The \"I have a dream\" speech by Dr. King\n",
|
||||
@@ -296,7 +332,7 @@
|
||||
"source": [
|
||||
"## Vectara as a Retriever\n",
|
||||
"\n",
|
||||
"Vectara, as all the other vector stores, can be used also as a LangChain Retriever:"
|
||||
"Vectara, as all the other LangChain vectorstores, is most often used as a LangChain Retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -378,7 +414,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
240
docs/extras/integrations/vectorstores/xata.ipynb
Normal file
240
docs/extras/integrations/vectorstores/xata.ipynb
Normal file
@@ -0,0 +1,240 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Xata\n",
|
||||
"\n",
|
||||
"> [Xata](https://xata.io) is a serverless data platform, based on PostgreSQL. It provides a Python SDK for interacting with your database, and a UI for managing your data.\n",
|
||||
"> Xata has a native vector type, which can be added to any table, and supports similarity search. LangChain inserts vectors directly to Xata, and queries it for the nearest neighbors of a given vector, so that you can use all the LangChain Embeddings integrations with Xata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook guides you how to use Xata as a VectorStore."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Create a database to use as a vector store\n",
|
||||
"\n",
|
||||
"In the [Xata UI](https://app.xata.io) create a new database. You can name it whatever you want, in this notepad we'll use `langchain`.\n",
|
||||
"Create a table, again you can name it anything, but we will use `vectors`. Add the following columns via the UI:\n",
|
||||
"\n",
|
||||
"* `content` of type \"Text\". This is used to store the `Document.pageContent` values.\n",
|
||||
"* `embedding` of type \"Vector\". Use the dimension used by the model you plan to use. In this notebook we use OpenAI embeddings, which have 1536 dimensions.\n",
|
||||
"* `search` of type \"Text\". This is used as a metadata column by this example.\n",
|
||||
"* any other columns you want to use as metadata. They are populated from the `Document.metadata` object. For example, if in the `Document.metadata` object you have a `title` property, you can create a `title` column in the table and it will be populated.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's first install our dependencies:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install xata==1.0.0a7 openai tiktoken langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's load the OpenAI key to the environemnt. If you don't have one you can create an OpenAI account and create a key on this [page](https://platform.openai.com/account/api-keys)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Similarly, we need to get the environment variables for Xata. You can create a new API key by visiting your [account settings](https://app.xata.io/settings). To find the database URL, go to the Settings page of the database that you have created. The database URL should look something like this: `https://demo-uni3q8.eu-west-1.xata.sh/db/langchain`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"api_key = getpass.getpass(\"Xata API key: \")\n",
|
||||
"db_url = input(\"Xata database URL (copy it from your DB settings):\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.vectorstores.xata import XataVectorStore\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the Xata vector store\n",
|
||||
"Let's import our test dataset:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now create the actual vector store, backed by the Xata table."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store = XataVectorStore.from_documents(docs, embeddings, api_key=api_key, db_url=db_url, table_name=\"vectors\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After running the above command, if you go to the Xata UI, you should see the documents loaded together with their embeddings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"found_docs = vector_store.similarity_search(query)\n",
|
||||
"print(found_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search with score (vector distance)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = vector_store.similarity_search_with_score(query)\n",
|
||||
"for doc, score in result:\n",
|
||||
" print(f\"document={doc}, score={score}\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -109,11 +109,11 @@
|
||||
"source": [
|
||||
"# Reorder the documents:\n",
|
||||
"# Less relevant document will be at the middle of the list and more\n",
|
||||
"# relevant elements at begining / end.\n",
|
||||
"# relevant elements at beginning / end.\n",
|
||||
"reordering = LongContextReorder()\n",
|
||||
"reordered_docs = reordering.transform_documents(docs)\n",
|
||||
"\n",
|
||||
"# Confirm that the 4 relevant documents are at begining and end.\n",
|
||||
"# Confirm that the 4 relevant documents are at beginning and end.\n",
|
||||
"reordered_docs"
|
||||
]
|
||||
},
|
||||
|
||||
727
docs/extras/use_cases/chatbots.ipynb
Normal file
727
docs/extras/use_cases/chatbots.ipynb
Normal file
@@ -0,0 +1,727 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ee7f95e4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chatbots\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/chatbots/chatbots.ipynb)\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
"Chatbots are one of the central LLM use-cases. The core features of chatbots are that they can have long-running conversations and have access to information that users want to know about.\n",
|
||||
"\n",
|
||||
"Aside from basic prompting and LLMs, memory and retrieval are the core components of a chatbot. Memory allows a chatbot to remember past interactions, and retrieval provides a chatbot with up-to-date, domain-specific information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "56615b45",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff48f490",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"The chat model interface is based around messages rather than raw text. Several components are important to consider for chat:\n",
|
||||
"\n",
|
||||
"* `chat model`: See [here](/docs/integrations/chat) for a list of chat model integrations and [here](/docs/modules/model_io/models/chat) for documentation on the chat model interface in LangChain. You can use `LLMs` (see [here](/docs/modules/model_io/models/llms)) for chatbots as well, but chat models have a more conversational tone and natively support a message interface.\n",
|
||||
"* `prompt template`: Prompt templates make it easy to assemble prompts that combine default messages, user input, chat history, and (optionally) additional retrieved context.\n",
|
||||
"* `memory`: [See here](/docs/modules/memory/) for in-depth documentation on memory types\n",
|
||||
"* `retriever` (optional): [See here](/docs/modules/data_connection/retrievers) for in-depth documentation on retrieval systems. These are useful if you want to build a chatbot with domain-specific knowledge.\n",
|
||||
"\n",
|
||||
"## Quickstart\n",
|
||||
"\n",
|
||||
"Here's a quick preview of how we can create chatbot interfaces. First let's install some dependencies and set the required credentials:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5070a1fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain openai \n",
|
||||
"\n",
|
||||
"# Set env var OPENAI_API_KEY or load from a .env file:\n",
|
||||
"# import dotenv\n",
|
||||
"# dotenv.load_env()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88197b95",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With a plain chat model, we can get chat completions by [passing one or more messages](/docs/modules/model_io/models/chat) to the model.\n",
|
||||
"\n",
|
||||
"The chat model will respond with a message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "5b0d84ae",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage\n",
|
||||
")\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI()\n",
|
||||
"chat([HumanMessage(content=\"Translate this sentence from English to French: I love programming.\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7935d9a5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And if we pass in a list of messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "afd27a9f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n",
|
||||
" HumanMessage(content=\"I love programming.\")\n",
|
||||
"]\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c7a1d169",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can then wrap our chat model in a `ConversationChain`, which has built-in memory for remembering past user inputs and model outputs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "fdb05d74",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Je adore la programmation.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain \n",
|
||||
" \n",
|
||||
"conversation = ConversationChain(llm=chat) \n",
|
||||
"conversation.run(\"Translate this sentence from English to French: I love programming.\") "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "d801a173",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ich liebe Programmieren.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.run(\"Translate it to German.\") "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9e86788c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Memory \n",
|
||||
"\n",
|
||||
"As we mentioned above, the core component of chatbots is the memory system. One of the simplest and most commonly used forms of memory is `ConversationBufferMemory`:\n",
|
||||
"* This memory allows for storing of messages in a `buffer`\n",
|
||||
"* When called in a chain, it returns all of the messages it has stored\n",
|
||||
"\n",
|
||||
"LangChain comes with many other types of memory, too. [See here](/docs/modules/memory/) for in-depth documentation on memory types.\n",
|
||||
"\n",
|
||||
"For now let's take a quick look at ConversationBufferMemory. We can manually add a few chat messages to the memory like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1380a4ea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory()\n",
|
||||
"memory.chat_memory.add_user_message(\"hi!\")\n",
|
||||
"memory.chat_memory.add_ai_message(\"whats up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a3d5d1f8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now we can load from our memory. The key method exposed by all `Memory` classes is `load_memory_variables`. This takes in any initial chain input and returns a list of memory variables which are added to the chain input. \n",
|
||||
"\n",
|
||||
"Since this simple memory type doesn't actually take into account the chain input when loading memory, we can pass in an empty input for now:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "982467e7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'Human: hi!\\nAI: whats up?'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c1b20d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also keep a sliding window of the most recent `k` interactions using `ConversationBufferWindowMemory`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "f72b9ff7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'Human: not much you\\nAI: not much'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferWindowMemory(k=1)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})\n",
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7b84f90a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`ConversationSummaryMemory` is an extension of this theme.\n",
|
||||
"\n",
|
||||
"It creates a summary of the conversation over time. \n",
|
||||
"\n",
|
||||
"This memory is most useful for longer conversations where the full message history would consume many tokens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "ca2596ed",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.memory import ConversationSummaryMemory\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"memory = ConversationSummaryMemory(llm=llm)\n",
|
||||
"memory.save_context({\"input\": \"hi\"},{\"output\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"im working on better docs for chatbots\"},{\"output\": \"oh, that sounds like a lot of work\"})\n",
|
||||
"memory.save_context({\"input\": \"yes, but it's worth the effort\"},{\"output\": \"agreed, good docs are important!\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "060f69b7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': '\\nThe human greets the AI, to which the AI responds. The human then mentions they are working on better docs for chatbots, to which the AI responds that it sounds like a lot of work. The human agrees that it is worth the effort, and the AI agrees that good docs are important.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4bf036f6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`ConversationSummaryBufferMemory` extends this a bit further:\n",
|
||||
"\n",
|
||||
"It uses token length rather than number of interactions to determine when to flush interactions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "38b42728",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationSummaryBufferMemory\n",
|
||||
"memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff0db09f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Conversation \n",
|
||||
"\n",
|
||||
"We can unpack what goes under the hood with `ConversationChain`. \n",
|
||||
"\n",
|
||||
"We can specify our memory, `ConversationSummaryMemory` and we can specify the prompt. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "fccd6995",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSystem: You are a nice chatbot having a conversation with a human.\n",
|
||||
"Human: hi\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'hi',\n",
|
||||
" 'chat_history': [HumanMessage(content='hi', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Hello! How can I assist you today?', additional_kwargs={}, example=False)],\n",
|
||||
" 'text': 'Hello! How can I assist you today?'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" MessagesPlaceholder,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"\n",
|
||||
"# LLM\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"# Prompt \n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" messages=[\n",
|
||||
" SystemMessagePromptTemplate.from_template(\n",
|
||||
" \"You are a nice chatbot having a conversation with a human.\"\n",
|
||||
" ),\n",
|
||||
" # The `variable_name` here is what must align with memory\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"{question}\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Notice that we `return_messages=True` to fit into the MessagesPlaceholder\n",
|
||||
"# Notice that `\"chat_history\"` aligns with the MessagesPlaceholder name\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\",return_messages=True)\n",
|
||||
"conversation = LLMChain(\n",
|
||||
" llm=llm,\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=memory\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Notice that we just pass in the `question` variables - `chat_history` gets populated by memory\n",
|
||||
"conversation({\"question\": \"hi\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "eb0cadfd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSystem: You are a nice chatbot having a conversation with a human.\n",
|
||||
"Human: hi\n",
|
||||
"AI: Hello! How can I assist you today?\n",
|
||||
"Human: Translate this sentence from English to French: I love programming.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'Translate this sentence from English to French: I love programming.',\n",
|
||||
" 'chat_history': [HumanMessage(content='hi', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Hello! How can I assist you today?', additional_kwargs={}, example=False),\n",
|
||||
" HumanMessage(content='Translate this sentence from English to French: I love programming.', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Sure! The translation of \"I love programming\" from English to French is \"J\\'adore programmer.\"', additional_kwargs={}, example=False)],\n",
|
||||
" 'text': 'Sure! The translation of \"I love programming\" from English to French is \"J\\'adore programmer.\"'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation({\"question\": \"Translate this sentence from English to French: I love programming.\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "c56d6219",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mSystem: You are a nice chatbot having a conversation with a human.\n",
|
||||
"Human: hi\n",
|
||||
"AI: Hello! How can I assist you today?\n",
|
||||
"Human: Translate this sentence from English to French: I love programming.\n",
|
||||
"AI: Sure! The translation of \"I love programming\" from English to French is \"J'adore programmer.\"\n",
|
||||
"Human: Now translate the sentence to German.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'Now translate the sentence to German.',\n",
|
||||
" 'chat_history': [HumanMessage(content='hi', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Hello! How can I assist you today?', additional_kwargs={}, example=False),\n",
|
||||
" HumanMessage(content='Translate this sentence from English to French: I love programming.', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Sure! The translation of \"I love programming\" from English to French is \"J\\'adore programmer.\"', additional_kwargs={}, example=False),\n",
|
||||
" HumanMessage(content='Now translate the sentence to German.', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='Certainly! The translation of \"I love programming\" from English to German is \"Ich liebe das Programmieren.\"', additional_kwargs={}, example=False)],\n",
|
||||
" 'text': 'Certainly! The translation of \"I love programming\" from English to German is \"Ich liebe das Programmieren.\"'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation({\"question\": \"Now translate the sentence to German.\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43858489",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the chat history preserved in the prompt using the [LangSmith trace](https://smith.langchain.com/public/dce34c57-21ca-4283-9020-a8e0d78a59de/r).\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3f35cc16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat Retrieval\n",
|
||||
"\n",
|
||||
"Now, suppose we want to [chat with documents](https://twitter.com/mayowaoshin/status/1640385062708424708?s=20) or some other source of knowledge.\n",
|
||||
"\n",
|
||||
"This is popular use case, combining chat with [document retrieval](/docs/use_cases/question_answering).\n",
|
||||
"\n",
|
||||
"It allows us to chat with specific information that the model was not trained on."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a01e7b5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install tiktoken chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88e220de",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load a blog post."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "1b99b36c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"\n",
|
||||
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3662ce79",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Split and store this in a vector."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "058f1541",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "603d9441",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create our memory, as before, but's let's use `ConversationSummaryMemory`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "f89fd3f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationSummaryMemory(llm=llm,memory_key=\"chat_history\",return_messages=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "28503423",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "a9c3bd5e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'How do agents use Task decomposition?',\n",
|
||||
" 'chat_history': [SystemMessage(content='', additional_kwargs={})],\n",
|
||||
" 'answer': 'Agents can use task decomposition in several ways:\\n\\n1. Simple prompting: Agents can use Language Model based prompting to break down tasks into subgoals. For example, by providing prompts like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\", the agent can generate a sequence of smaller steps that lead to the completion of the overall task.\\n\\n2. Task-specific instructions: Agents can be given task-specific instructions to guide their planning process. For example, if the task is to write a novel, the agent can be instructed to \"Write a story outline.\" This provides a high-level structure for the task and helps in breaking it down into smaller components.\\n\\n3. Human inputs: Agents can also take inputs from humans to decompose tasks. This can be done through direct communication or by leveraging human expertise. Humans can provide guidance and insights to help the agent break down complex tasks into manageable subgoals.\\n\\nOverall, task decomposition allows agents to break down large tasks into smaller, more manageable subgoals, enabling them to plan and execute complex tasks efficiently.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa(\"How do agents use Task decomposition?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "a29a7713",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What are the various ways to implemet memory to support it?',\n",
|
||||
" 'chat_history': [SystemMessage(content='The human asks how agents use task decomposition. The AI explains that agents can use task decomposition in several ways, including simple prompting, task-specific instructions, and human inputs. Task decomposition allows agents to break down large tasks into smaller, more manageable subgoals, enabling them to plan and execute complex tasks efficiently.', additional_kwargs={})],\n",
|
||||
" 'answer': 'There are several ways to implement memory to support task decomposition:\\n\\n1. Long-Term Memory Management: This involves storing and organizing information in a long-term memory system. The agent can retrieve past experiences, knowledge, and learned strategies to guide the task decomposition process.\\n\\n2. Internet Access: The agent can use internet access to search for relevant information and gather resources to aid in task decomposition. This allows the agent to access a vast amount of information and utilize it in the decomposition process.\\n\\n3. GPT-3.5 Powered Agents: The agent can delegate simple tasks to GPT-3.5 powered agents. These agents can perform specific tasks or provide assistance in task decomposition, allowing the main agent to focus on higher-level planning and decision-making.\\n\\n4. File Output: The agent can store the results of task decomposition in files or documents. This allows for easy retrieval and reference during the execution of the task.\\n\\nThese memory resources help the agent in organizing and managing information, making informed decisions, and effectively decomposing complex tasks into smaller, manageable subgoals.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa(\"What are the various ways to implemet memory to support it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d5e8d5f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Again, we can use the [LangSmith trace](https://smith.langchain.com/public/18460363-0c70-4c72-81c7-3b57253bb58c/r) to explore the prompt structure.\n",
|
||||
"\n",
|
||||
"### Going deeper \n",
|
||||
"\n",
|
||||
"* Agents, such as the [conversational retrieval agent](/docs/use_cases/question_answering/how_to/conversational_retrieval_agents), can be used for retrieval when necessary while also holding a conversation.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ff8925f-4c21-4680-a9cd-3670ad4852b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
sidebar_position: 4
|
||||
---
|
||||
|
||||
# Chatbots
|
||||
|
||||
Since language models are good at producing text, that makes them ideal for creating chatbots.
|
||||
Aside from the base prompts/LLMs, an important concept to know for Chatbots is `memory`.
|
||||
Most chat based applications rely on remembering what happened in previous interactions, which `memory` is designed to help with.
|
||||
|
||||
The following resources exist:
|
||||
- [ChatGPT Clone](/docs/modules/agents/how_to/chatgpt_clone.html): A notebook walking through how to recreate a ChatGPT-like experience with LangChain.
|
||||
- [Conversation Agent](/docs/modules/agents/agent_types/chat_conversation_agent.html): A notebook walking through how to create an agent optimized for conversation.
|
||||
|
||||
|
||||
Additional related resources include:
|
||||
- [Memory concepts and examples](/docs/modules/memory/): Explanation of key concepts related to memory along with how-to's and examples.
|
||||
|
||||
More end-to-end examples include:
|
||||
- [Voice Assistant](./voice_assistant.html): A notebook walking through how to create a voice assistant using LangChain.
|
||||
@@ -1,482 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Voice Assistant\n",
|
||||
"\n",
|
||||
"This chain creates a clone of ChatGPT with a few modifications to make it a voice assistant. \n",
|
||||
"It uses the `pyttsx3` and `speech_recognition` libraries to convert text to speech and speech to text respectively. The prompt template is also changed to make it more suitable for voice assistant use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"template = \"\"\"Assistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"{history}\n",
|
||||
"Human: {human_input}\n",
|
||||
"Assistant:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"history\", \"human_input\"], template=template)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chatgpt_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=ConversationBufferWindowMemory(k=2),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import speech_recognition as sr\n",
|
||||
"import pyttsx3\n",
|
||||
"\n",
|
||||
"engine = pyttsx3.init()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def listen():\n",
|
||||
" r = sr.Recognizer()\n",
|
||||
" with sr.Microphone() as source:\n",
|
||||
" print(\"Calibrating...\")\n",
|
||||
" r.adjust_for_ambient_noise(source, duration=5)\n",
|
||||
" # optional parameters to adjust microphone sensitivity\n",
|
||||
" # r.energy_threshold = 200\n",
|
||||
" # r.pause_threshold=0.5\n",
|
||||
"\n",
|
||||
" print(\"Okay, go!\")\n",
|
||||
" while 1:\n",
|
||||
" text = \"\"\n",
|
||||
" print(\"listening now...\")\n",
|
||||
" try:\n",
|
||||
" audio = r.listen(source, timeout=5, phrase_time_limit=30)\n",
|
||||
" print(\"Recognizing...\")\n",
|
||||
" # whisper model options are found here: https://github.com/openai/whisper#available-models-and-languages\n",
|
||||
" # other speech recognition models are also available.\n",
|
||||
" text = r.recognize_whisper(\n",
|
||||
" audio,\n",
|
||||
" model=\"medium.en\",\n",
|
||||
" show_dict=True,\n",
|
||||
" )[\"text\"]\n",
|
||||
" except Exception as e:\n",
|
||||
" unrecognized_speech_text = (\n",
|
||||
" f\"Sorry, I didn't catch that. Exception was: {e}s\"\n",
|
||||
" )\n",
|
||||
" text = unrecognized_speech_text\n",
|
||||
" print(text)\n",
|
||||
"\n",
|
||||
" response_text = chatgpt_chain.predict(human_input=text)\n",
|
||||
" print(response_text)\n",
|
||||
" engine.say(response_text)\n",
|
||||
" engine.runAndWait()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calibrating...\n",
|
||||
"Okay, go!\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\jaden\\AppData\\Roaming\\Python\\Python310\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Hello, Assistant. What's going on?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Human: Hello, Assistant. What's going on?\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Hi there! It's great to hear from you. I'm doing well. How can I help you today?\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Hello, Assistant. What's going on?\n",
|
||||
"AI: Hi there! It's great to hear from you. I'm doing well. How can I help you today?\n",
|
||||
"Human: That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" That's great to hear! What can I do for you today?\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Thank you.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Hello, Assistant. What's going on?\n",
|
||||
"AI: Hi there! It's great to hear from you. I'm doing well. How can I help you today?\n",
|
||||
"Human: That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"AI: That's great to hear! What can I do for you today?\n",
|
||||
"Human: Thank you.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" You're welcome! Is there anything else I can help you with?\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" I'd like to learn more about neural networks.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: That's cool. Isn't that neat? Yeah, I'm doing great.\n",
|
||||
"AI: That's great to hear! What can I do for you today?\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome! Is there anything else I can help you with?\n",
|
||||
"Human: I'd like to learn more about neural networks.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Tell me a fun fact about neural networks.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome! Is there anything else I can help you with?\n",
|
||||
"Human: I'd like to learn more about neural networks.\n",
|
||||
"AI: Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.\n",
|
||||
"Human: Tell me a fun fact about neural networks.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Tell me about a brand new discovered bird species.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: I'd like to learn more about neural networks.\n",
|
||||
"AI: Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.\n",
|
||||
"Human: Tell me a fun fact about neural networks.\n",
|
||||
"AI: Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.\n",
|
||||
"Human: Tell me about a brand new discovered bird species.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Tell me a fun fact about neural networks.\n",
|
||||
"AI: Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.\n",
|
||||
"Human: Tell me about a brand new discovered bird species.\n",
|
||||
"AI: A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.\n",
|
||||
"Human: Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Tell me about a brand new discovered bird species.\n",
|
||||
"AI: A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.\n",
|
||||
"Human: Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"AI: Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.\n",
|
||||
"Human: Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Thank you! I'm glad you enjoyed it.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Thank you.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Tell me a children's story about the importance of honesty and trust.\n",
|
||||
"AI: Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.\n",
|
||||
"Human: Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"AI: Thank you! I'm glad you enjoyed it.\n",
|
||||
"Human: Thank you.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" You're welcome!\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Wow, Assistant, that was a really good story. Congratulations!\n",
|
||||
"AI: Thank you! I'm glad you enjoyed it.\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome!\n",
|
||||
"Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" Our whole process of awesome is free.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Thank you.\n",
|
||||
"AI: You're welcome!\n",
|
||||
"Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"AI: Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.\n",
|
||||
"Human: Our whole process of awesome is free.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" That's great! It's always nice to have access to free tools and resources.\n",
|
||||
"listening now...\n",
|
||||
"Recognizing...\n",
|
||||
" No, I meant to ask, are those options that you mentioned free? No, I meant to ask, are those options that you mentioned free?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n",
|
||||
"\n",
|
||||
"Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?\n",
|
||||
"AI: Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.\n",
|
||||
"Human: Our whole process of awesome is free.\n",
|
||||
"AI: That's great! It's always nice to have access to free tools and resources.\n",
|
||||
"Human: No, I meant to ask, are those options that you mentioned free? No, I meant to ask, are those options that you mentioned free?\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
" Yes, the online brands I mentioned are all free to use. Adobe Photoshop Express, Pixlr, and Fotor are all free to use, and Freq is a free music production platform.\n",
|
||||
"listening now...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "KeyboardInterrupt",
|
||||
"evalue": "",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[1;32mIn[6], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m listen(\u001b[39mNone\u001b[39;49;00m)\n",
|
||||
"Cell \u001b[1;32mIn[5], line 20\u001b[0m, in \u001b[0;36mlisten\u001b[1;34m(command_queue)\u001b[0m\n\u001b[0;32m 18\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m'\u001b[39m\u001b[39mlistening now...\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m 19\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m---> 20\u001b[0m audio \u001b[39m=\u001b[39m r\u001b[39m.\u001b[39;49mlisten(source, timeout\u001b[39m=\u001b[39;49m\u001b[39m5\u001b[39;49m, phrase_time_limit\u001b[39m=\u001b[39;49m\u001b[39m30\u001b[39;49m)\n\u001b[0;32m 21\u001b[0m \u001b[39m# audio = r.record(source,duration = 5)\u001b[39;00m\n\u001b[0;32m 22\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m'\u001b[39m\u001b[39mRecognizing...\u001b[39m\u001b[39m'\u001b[39m)\n",
|
||||
"File \u001b[1;32mc:\\ProgramData\\miniconda3\\envs\\lang\\lib\\site-packages\\speech_recognition\\__init__.py:523\u001b[0m, in \u001b[0;36mRecognizer.listen\u001b[1;34m(self, source, timeout, phrase_time_limit, snowboy_configuration)\u001b[0m\n\u001b[0;32m 520\u001b[0m \u001b[39mif\u001b[39;00m phrase_time_limit \u001b[39mand\u001b[39;00m elapsed_time \u001b[39m-\u001b[39m phrase_start_time \u001b[39m>\u001b[39m phrase_time_limit:\n\u001b[0;32m 521\u001b[0m \u001b[39mbreak\u001b[39;00m\n\u001b[1;32m--> 523\u001b[0m buffer \u001b[39m=\u001b[39m source\u001b[39m.\u001b[39;49mstream\u001b[39m.\u001b[39;49mread(source\u001b[39m.\u001b[39;49mCHUNK)\n\u001b[0;32m 524\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(buffer) \u001b[39m==\u001b[39m \u001b[39m0\u001b[39m: \u001b[39mbreak\u001b[39;00m \u001b[39m# reached end of the stream\u001b[39;00m\n\u001b[0;32m 525\u001b[0m frames\u001b[39m.\u001b[39mappend(buffer)\n",
|
||||
"File \u001b[1;32mc:\\ProgramData\\miniconda3\\envs\\lang\\lib\\site-packages\\speech_recognition\\__init__.py:199\u001b[0m, in \u001b[0;36mMicrophone.MicrophoneStream.read\u001b[1;34m(self, size)\u001b[0m\n\u001b[0;32m 198\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mread\u001b[39m(\u001b[39mself\u001b[39m, size):\n\u001b[1;32m--> 199\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpyaudio_stream\u001b[39m.\u001b[39;49mread(size, exception_on_overflow\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m)\n",
|
||||
"File \u001b[1;32mc:\\ProgramData\\miniconda3\\envs\\lang\\lib\\site-packages\\pyaudio\\__init__.py:570\u001b[0m, in \u001b[0;36mPyAudio.Stream.read\u001b[1;34m(self, num_frames, exception_on_overflow)\u001b[0m\n\u001b[0;32m 567\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_is_input:\n\u001b[0;32m 568\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mIOError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39mNot input stream\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[0;32m 569\u001b[0m paCanNotReadFromAnOutputOnlyStream)\n\u001b[1;32m--> 570\u001b[0m \u001b[39mreturn\u001b[39;00m pa\u001b[39m.\u001b[39;49mread_stream(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_stream, num_frames,\n\u001b[0;32m 571\u001b[0m exception_on_overflow)\n",
|
||||
"\u001b[1;31mKeyboardInterrupt\u001b[0m: "
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"listen(None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "lang",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
597
docs/extras/use_cases/extraction.ipynb
Normal file
597
docs/extras/use_cases/extraction.ipynb
Normal file
@@ -0,0 +1,597 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b84edb4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Extraction\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/extraction/extraction.ipynb)\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
"Getting structured output from raw LLM generations is hard.\n",
|
||||
"\n",
|
||||
"For example, suppose you need the model output formatted with a specific schema for:\n",
|
||||
"\n",
|
||||
"- Extracting a structured row to insert into a database \n",
|
||||
"- Extracting API parameters\n",
|
||||
"- Extracting different parts of a user query (e.g., for semantic vs keyword search)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "178dbc59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "97f474d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview \n",
|
||||
"\n",
|
||||
"There are two primary approaches for this:\n",
|
||||
"\n",
|
||||
"- `Functions`: Some LLMs can call [functions](https://openai.com/blog/function-calling-and-other-api-updates) to extract arbitrary entities from LLM responses.\n",
|
||||
"\n",
|
||||
"- `Parsing`: [Output parsers](/docs/modules/model_io/output_parsers/) are classes that structure LLM responses. \n",
|
||||
"\n",
|
||||
"Only some LLMs support functions (e.g., OpenAI), and they are more general than parsers. \n",
|
||||
"\n",
|
||||
"Parsers extract precisely what is enumerated in a provided schema (e.g., specific attributes of a person).\n",
|
||||
"\n",
|
||||
"Functions can infer things beyond of a provided schema (e.g., attributes about a person that you did not ask for)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "25d89f21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Quickstart\n",
|
||||
"\n",
|
||||
"OpenAI funtions are one way to get started with extraction.\n",
|
||||
"\n",
|
||||
"Define a schema that specifies the properties we want to extract from the LLM output.\n",
|
||||
"\n",
|
||||
"Then, we can use `create_extraction_chain` to extract our desired schema using an OpenAI function call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3f5ec7a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install langchain openai \n",
|
||||
"\n",
|
||||
"# Set env var OPENAI_API_KEY or load from a .env file:\n",
|
||||
"# import dotenv\n",
|
||||
"# dotenv.load_env()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "3e017ba0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Alex', 'height': 5, 'hair_color': 'blonde'},\n",
|
||||
" {'name': 'Claudia', 'height': 6, 'hair_color': 'brunette'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import create_extraction_chain\n",
|
||||
"\n",
|
||||
"# Schema\n",
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"type\": \"string\"},\n",
|
||||
" \"height\": {\"type\": \"integer\"},\n",
|
||||
" \"hair_color\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"name\", \"height\"],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# Input \n",
|
||||
"inp = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\"\"\"\n",
|
||||
"\n",
|
||||
"# Run chain\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
|
||||
"chain = create_extraction_chain(schema, llm)\n",
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6f7eb826",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Option 1: OpenAI funtions\n",
|
||||
"\n",
|
||||
"### Looking under the hood\n",
|
||||
"\n",
|
||||
"Let's dig into what is happening when we call `create_extraction_chain`.\n",
|
||||
"\n",
|
||||
"The [LangSmith trace](https://smith.langchain.com/public/72bc3205-7743-4ca6-929a-966a9d4c2a77/r) shows that we call the function `information_extraction` on the input string, `inp`.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This `information_extraction` function is defined [here](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/chains/openai_functions/extraction.py) and returns a dict.\n",
|
||||
"\n",
|
||||
"We can see the `dict` in the model output:\n",
|
||||
"```\n",
|
||||
" {\n",
|
||||
" \"info\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"Alex\",\n",
|
||||
" \"height\": 5,\n",
|
||||
" \"hair_color\": \"blonde\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"Claudia\",\n",
|
||||
" \"height\": 6,\n",
|
||||
" \"hair_color\": \"brunette\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"The `create_extraction_chain` then parses the raw LLM output for us using [`JsonKeyOutputFunctionsParser`](https://github.com/langchain-ai/langchain/blob/f81e613086d211327b67b0fb591fd4d5f9a85860/libs/langchain/langchain/chains/openai_functions/extraction.py#L62).\n",
|
||||
"\n",
|
||||
"This results in the list of JSON objects returned by the chain above:\n",
|
||||
"```\n",
|
||||
"[{'name': 'Alex', 'height': 5, 'hair_color': 'blonde'},\n",
|
||||
" {'name': 'Claudia', 'height': 6, 'hair_color': 'brunette'}]\n",
|
||||
" ```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dcb03138",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Multiple entity types\n",
|
||||
"\n",
|
||||
"We can extend this further.\n",
|
||||
"\n",
|
||||
"Let's say we want to differentiate between dogs and people.\n",
|
||||
"\n",
|
||||
"We can add `person_` and `dog_` prefixes for each property"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "01eae733",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'person_name': 'Alex',\n",
|
||||
" 'person_height': 5,\n",
|
||||
" 'person_hair_color': 'blonde',\n",
|
||||
" 'dog_name': 'Frosty',\n",
|
||||
" 'dog_breed': 'labrador'},\n",
|
||||
" {'person_name': 'Claudia',\n",
|
||||
" 'person_height': 6,\n",
|
||||
" 'person_hair_color': 'brunette'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"person_name\": {\"type\": \"string\"},\n",
|
||||
" \"person_height\": {\"type\": \"integer\"},\n",
|
||||
" \"person_hair_color\": {\"type\": \"string\"},\n",
|
||||
" \"dog_name\": {\"type\": \"string\"},\n",
|
||||
" \"dog_breed\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"person_name\", \"person_height\"],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"chain = create_extraction_chain(schema, llm)\n",
|
||||
"\n",
|
||||
"inp = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
"Alex's dog Frosty is a labrador and likes to play hide and seek.\"\"\"\n",
|
||||
"\n",
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f205905c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Unrelated entities\n",
|
||||
"\n",
|
||||
"If we use `required: []`, we allow the model to return **only** person attributes or **only** dog attributes for a single entity (person or dog)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "6ff4ac7e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'person_name': 'Alex', 'person_height': 5, 'person_hair_color': 'blonde'},\n",
|
||||
" {'person_name': 'Claudia',\n",
|
||||
" 'person_height': 6,\n",
|
||||
" 'person_hair_color': 'brunette'},\n",
|
||||
" {'dog_name': 'Willow', 'dog_breed': 'German Shepherd'},\n",
|
||||
" {'dog_name': 'Milo', 'dog_breed': 'border collie'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"person_name\": {\"type\": \"string\"},\n",
|
||||
" \"person_height\": {\"type\": \"integer\"},\n",
|
||||
" \"person_hair_color\": {\"type\": \"string\"},\n",
|
||||
" \"dog_name\": {\"type\": \"string\"},\n",
|
||||
" \"dog_breed\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"chain = create_extraction_chain(schema, llm)\n",
|
||||
"\n",
|
||||
"inp = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
"Willow is a German Shepherd that likes to play with other dogs and can always be found playing with Milo, a border collie that lives close by.\"\"\"\n",
|
||||
"\n",
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "34f3b958",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Extra information\n",
|
||||
"\n",
|
||||
"The power of functions (relative to using parsers alone) lies in the ability to perform sematic extraction.\n",
|
||||
"\n",
|
||||
"In particular, `we can ask for things that are not explictly enumerated in the schema`.\n",
|
||||
"\n",
|
||||
"Suppose we want unspecified additional information about dogs. \n",
|
||||
"\n",
|
||||
"We can use add a placeholder for unstructured extraction, `dog_extra_info`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "40c7b26f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'person_name': 'Alex', 'person_height': 5, 'person_hair_color': 'blonde'},\n",
|
||||
" {'person_name': 'Claudia',\n",
|
||||
" 'person_height': 6,\n",
|
||||
" 'person_hair_color': 'brunette'},\n",
|
||||
" {'dog_name': 'Willow',\n",
|
||||
" 'dog_breed': 'German Shepherd',\n",
|
||||
" 'dog_extra_info': 'likes to play with other dogs'},\n",
|
||||
" {'dog_name': 'Milo',\n",
|
||||
" 'dog_breed': 'border collie',\n",
|
||||
" 'dog_extra_info': 'lives close by'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"person_name\": {\"type\": \"string\"},\n",
|
||||
" \"person_height\": {\"type\": \"integer\"},\n",
|
||||
" \"person_hair_color\": {\"type\": \"string\"},\n",
|
||||
" \"dog_name\": {\"type\": \"string\"},\n",
|
||||
" \"dog_breed\": {\"type\": \"string\"},\n",
|
||||
" \"dog_extra_info\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"chain = create_extraction_chain(schema, llm)\n",
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a949c60",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This gives us additional information about the dogs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bf71ddce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Pydantic \n",
|
||||
"\n",
|
||||
"Pydantic is a data validation and settings management library for Python. \n",
|
||||
"\n",
|
||||
"It allows you to create data classes with attributes that are automatically validated when you instantiate an object.\n",
|
||||
"\n",
|
||||
"Lets define a class with attributes annotated with types."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d36a743b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Properties(person_name='Alex', person_height=5, person_hair_color='blonde', dog_breed=None, dog_name=None),\n",
|
||||
" Properties(person_name='Claudia', person_height=6, person_hair_color='brunette', dog_breed=None, dog_name=None)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Optional, List\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from langchain.chains import create_extraction_chain_pydantic\n",
|
||||
"\n",
|
||||
"# Pydantic data class\n",
|
||||
"class Properties(BaseModel):\n",
|
||||
" person_name: str\n",
|
||||
" person_height: int\n",
|
||||
" person_hair_color: str\n",
|
||||
" dog_breed: Optional[str]\n",
|
||||
" dog_name: Optional[str]\n",
|
||||
" \n",
|
||||
"# Extraction\n",
|
||||
"chain = create_extraction_chain_pydantic(pydantic_schema=Properties, llm=llm)\n",
|
||||
"\n",
|
||||
"# Run \n",
|
||||
"inp = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\"\"\"\n",
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "07a0351a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see from the [trace](https://smith.langchain.com/public/fed50ae6-26bb-4235-a254-e0b7a229d10f/r), we use the function `information_extraction`, as above, with the Pydantic schema. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cbd9f121",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Option 2: Parsing\n",
|
||||
"\n",
|
||||
"[Output parsers](/docs/modules/model_io/output_parsers/) are classes that help structure language model responses. \n",
|
||||
"\n",
|
||||
"As shown above, they are used to parse the output of the OpenAI function calls in `create_extraction_chain`.\n",
|
||||
"\n",
|
||||
"But, they can be used independent of functions.\n",
|
||||
"\n",
|
||||
"### Pydantic\n",
|
||||
"\n",
|
||||
"Just as a above, let's parse a generation based on a Pydantic data class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "64650362",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"People(people=[Person(person_name='Alex', person_height=5, person_hair_color='blonde', dog_breed=None, dog_name=None), Person(person_name='Claudia', person_height=6, person_hair_color='brunette', dog_breed=None, dog_name=None)])"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Sequence\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" PromptTemplate,\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from pydantic import BaseModel, Field, validator\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" person_name: str\n",
|
||||
" person_height: int\n",
|
||||
" person_hair_color: str\n",
|
||||
" dog_breed: Optional[str]\n",
|
||||
" dog_name: Optional[str]\n",
|
||||
"\n",
|
||||
"class People(BaseModel):\n",
|
||||
" \"\"\"Identifying information about all people in a text.\"\"\"\n",
|
||||
" people: Sequence[Person]\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"# Run \n",
|
||||
"query = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\"\"\"\n",
|
||||
"\n",
|
||||
"# Set up a parser + inject instructions into the prompt template.\n",
|
||||
"parser = PydanticOutputParser(pydantic_object=People)\n",
|
||||
"\n",
|
||||
"# Prompt\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Run\n",
|
||||
"_input = prompt.format_prompt(query=query)\n",
|
||||
"model = OpenAI(temperature=0)\n",
|
||||
"output = model(_input.to_string())\n",
|
||||
"parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "826899df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see from the [LangSmith trace](https://smith.langchain.com/public/8e3aa858-467e-46a5-aa49-5db65f0a2b9a/r) that we get the same output as above.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"We can see that we provide a two-shot prompt in order to instruct the LLM to output in our desired format.\n",
|
||||
"\n",
|
||||
"And, we need to do a bit more work:\n",
|
||||
"\n",
|
||||
"* Define a class that holds multiple instances of `Person`\n",
|
||||
"* Explicty parse the output of the LLM to the Pydantic class\n",
|
||||
"\n",
|
||||
"We can see this for other cases, too."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "837c350e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why did the chicken cross the road?', punchline='To get to the other side!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" PromptTemplate,\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from pydantic import BaseModel, Field, validator\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"\n",
|
||||
"# Define your desired data structure.\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" setup: str = Field(description=\"question to set up a joke\")\n",
|
||||
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
|
||||
"\n",
|
||||
" # You can add custom validation logic easily with Pydantic.\n",
|
||||
" @validator(\"setup\")\n",
|
||||
" def question_ends_with_question_mark(cls, field):\n",
|
||||
" if field[-1] != \"?\":\n",
|
||||
" raise ValueError(\"Badly formed question!\")\n",
|
||||
" return field\n",
|
||||
"\n",
|
||||
"# And a query intented to prompt a language model to populate the data structure.\n",
|
||||
"joke_query = \"Tell me a joke.\"\n",
|
||||
"\n",
|
||||
"# Set up a parser + inject instructions into the prompt template.\n",
|
||||
"parser = PydanticOutputParser(pydantic_object=Joke)\n",
|
||||
"\n",
|
||||
"# Prompt\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
|
||||
" input_variables=[\"query\"],\n",
|
||||
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Run\n",
|
||||
"_input = prompt.format_prompt(query=joke_query)\n",
|
||||
"model = OpenAI(temperature=0)\n",
|
||||
"output = model(_input.to_string())\n",
|
||||
"parser.parse(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d3601bde",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, we get an output of the `Joke` class, which respects our originally desired schema: 'setup' and 'punchline'.\n",
|
||||
"\n",
|
||||
"We can look at the [LangSmith trace](https://smith.langchain.com/public/69f11d41-41be-4319-93b0-6d0eda66e969/r) to see exactly what is going on under the hood.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Going deeper\n",
|
||||
"\n",
|
||||
"* The [output parser](/docs/modules/model_io/output_parsers/) documentation includes various parser examples for specific types (e.g., lists, datetimne, enum, etc). \n",
|
||||
"* [JSONFormer](/docs/integrations/llms/jsonformer_experimental) offers another way for structured decoding of a subset of the JSON Schema.\n",
|
||||
"* [Kor](https://eyurtsev.github.io/kor/) is another library for extraction where schema and examples can be provided to the LLM."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# Extraction
|
||||
|
||||
Most APIs and databases still deal with structured information.
|
||||
Therefore, in order to better work with those, it can be useful to extract structured information from text.
|
||||
Examples of this include:
|
||||
|
||||
- Extracting a structured row to insert into a database from a sentence
|
||||
- Extracting multiple rows to insert into a database from a long document
|
||||
- Extracting the correct API parameters from a user query
|
||||
|
||||
This work is extremely related to [output parsing](/docs/modules/model_io/output_parsers/).
|
||||
Output parsers are responsible for instructing the LLM to respond in a specific format.
|
||||
In this case, the output parsers specify the format of the data you would like to extract from the document.
|
||||
Then, in addition to the output format instructions, the prompt should also contain the data you would like to extract information from.
|
||||
|
||||
While normal output parsers are good enough for basic structuring of response data,
|
||||
when doing extraction you often want to extract more complicated or nested structures.
|
||||
For a deep dive on extraction, we recommend checking out [`kor`](https://eyurtsev.github.io/kor/),
|
||||
a library that uses the existing LangChain chain and OutputParser abstractions
|
||||
but deep dives on allowing extraction of more complicated schemas.
|
||||
@@ -1,566 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6605e7f7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Extraction with OpenAI Functions\n",
|
||||
"\n",
|
||||
"The extraction chain uses the OpenAI `functions` parameter to specify a schema to extract entities from a document. This helps us make sure that the model outputs exactly the schema of entities and properties that we want, with their appropriate types.\n",
|
||||
"\n",
|
||||
"The extraction chain is to be used when we want to extract several entities with their properties from the same passage (i.e. what people were mentioned in this passage?)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "34f04daf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.4) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.chains import create_extraction_chain, create_extraction_chain_pydantic\n",
|
||||
"from langchain.prompts import ChatPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a2648974",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ef034ce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extracting entities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "78ff9df9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To extract entities, we need to create a schema where we specify all the properties we want to find and the type we expect them to have. We can also specify which of these properties are required and which are optional."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4ac43eba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"type\": \"string\"},\n",
|
||||
" \"height\": {\"type\": \"integer\"},\n",
|
||||
" \"hair_color\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"name\", \"height\"],\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "640bd005",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = \"\"\"\n",
|
||||
"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
" \"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "64313214",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain(schema, llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17c48adb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, we extracted the required entities and their properties in the required format (it even calculated Claudia's height before returning!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cc5436ed",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Alex', 'height': 5, 'hair_color': 'blonde'},\n",
|
||||
" {'name': 'Claudia', 'height': 6, 'hair_color': 'brunette'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d51fcdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Several entity types"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5813affe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice that we are using OpenAI functions under the hood and thus the model can only call one function per request (with one, unique schema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "511b9838",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to extract more than one entity type, we need to introduce a little hack - we will define our properties with an included entity type. \n",
|
||||
"\n",
|
||||
"Following we have an example where we also want to extract dog attributes from the passage. Notice the 'person_' and 'dog_' prefixes we use for each property; this tells the model which entity type the property refers to. In this way, the model can return properties from several entity types in one single call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "cf243a26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"person_name\": {\"type\": \"string\"},\n",
|
||||
" \"person_height\": {\"type\": \"integer\"},\n",
|
||||
" \"person_hair_color\": {\"type\": \"string\"},\n",
|
||||
" \"dog_name\": {\"type\": \"string\"},\n",
|
||||
" \"dog_breed\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"person_name\", \"person_height\"],\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "52841fb3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = \"\"\"\n",
|
||||
"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
"Alex's dog Frosty is a labrador and likes to play hide and seek.\n",
|
||||
" \"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "93f904ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain(schema, llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb074f7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"People attributes and dog attributes were correctly extracted from the text in the same call"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "db3e9e17",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'person_name': 'Alex',\n",
|
||||
" 'person_height': 5,\n",
|
||||
" 'person_hair_color': 'blonde',\n",
|
||||
" 'dog_name': 'Frosty',\n",
|
||||
" 'dog_breed': 'labrador'},\n",
|
||||
" {'person_name': 'Claudia',\n",
|
||||
" 'person_height': 6,\n",
|
||||
" 'person_hair_color': 'brunette'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0273e0e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Unrelated entities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c07b3480",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"What if our entities are unrelated? In that case, the model will return the unrelated entities in different dictionaries, allowing us to successfully extract several unrelated entity types in the same call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01d98af0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice that we use `required: []`: we need to allow the model to return **only** person attributes or **only** dog attributes for a single entity (person or dog)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "e584c993",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"person_name\": {\"type\": \"string\"},\n",
|
||||
" \"person_height\": {\"type\": \"integer\"},\n",
|
||||
" \"person_hair_color\": {\"type\": \"string\"},\n",
|
||||
" \"dog_name\": {\"type\": \"string\"},\n",
|
||||
" \"dog_breed\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [],\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "ad6b105f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = \"\"\"\n",
|
||||
"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
"\n",
|
||||
"Willow is a German Shepherd that likes to play with other dogs and can always be found playing with Milo, a border collie that lives close by.\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "6bfe5a33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain(schema, llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "24fe09af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We have each entity in its own separate dictionary, with only the appropriate attributes being returned"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "f6e1fd89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'person_name': 'Alex', 'person_height': 5, 'person_hair_color': 'blonde'},\n",
|
||||
" {'person_name': 'Claudia',\n",
|
||||
" 'person_height': 6,\n",
|
||||
" 'person_hair_color': 'brunette'},\n",
|
||||
" {'dog_name': 'Willow', 'dog_breed': 'German Shepherd'},\n",
|
||||
" {'dog_name': 'Milo', 'dog_breed': 'border collie'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ac466d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extra info for an entity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d240ffc1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"What if.. _we don't know what we want?_ More specifically, say we know a few properties we want to extract for a given entity but we also want to know if there's any extra information in the passage. Fortunately, we don't need to structure everything - we can have unstructured extraction as well. \n",
|
||||
"\n",
|
||||
"We can do this by introducing another hack, namely the *extra_info* attribute - let's see an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 68,
|
||||
"id": "f19685f6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"person_name\": {\"type\": \"string\"},\n",
|
||||
" \"person_height\": {\"type\": \"integer\"},\n",
|
||||
" \"person_hair_color\": {\"type\": \"string\"},\n",
|
||||
" \"dog_name\": {\"type\": \"string\"},\n",
|
||||
" \"dog_breed\": {\"type\": \"string\"},\n",
|
||||
" \"dog_extra_info\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 81,
|
||||
"id": "200c3477",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = \"\"\"\n",
|
||||
"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
"\n",
|
||||
"Willow is a German Shepherd that likes to play with other dogs and can always be found playing with Milo, a border collie that lives close by.\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 82,
|
||||
"id": "ddad7dc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain(schema, llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e5c0dbbc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is nice to know more about Willow and Milo!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 83,
|
||||
"id": "c22cfd30",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'person_name': 'Alex', 'person_height': 5, 'person_hair_color': 'blonde'},\n",
|
||||
" {'person_name': 'Claudia',\n",
|
||||
" 'person_height': 6,\n",
|
||||
" 'person_hair_color': 'brunette'},\n",
|
||||
" {'dog_name': 'Willow',\n",
|
||||
" 'dog_breed': 'German Shepherd',\n",
|
||||
" 'dog_extra_information': 'likes to play with other dogs'},\n",
|
||||
" {'dog_name': 'Milo',\n",
|
||||
" 'dog_breed': 'border collie',\n",
|
||||
" 'dog_extra_information': 'lives close by'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 83,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "698b4c4d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pydantic example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6504a6d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use a Pydantic schema to choose the required properties and types and we will set as 'Optional' those that are not strictly required.\n",
|
||||
"\n",
|
||||
"By using the `create_extraction_chain_pydantic` function, we can send a Pydantic schema as input and the output will be an instantiated object that respects our desired schema. \n",
|
||||
"\n",
|
||||
"In this way, we can specify our schema in the same manner that we would a new class or function in Python - with purely Pythonic types."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "6792866b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional, List\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "36a63761",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Properties(BaseModel):\n",
|
||||
" person_name: str\n",
|
||||
" person_height: int\n",
|
||||
" person_hair_color: str\n",
|
||||
" dog_breed: Optional[str]\n",
|
||||
" dog_name: Optional[str]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "8ffd1e57",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic(pydantic_schema=Properties, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "24baa954",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = \"\"\"\n",
|
||||
"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
||||
"Alex's dog Frosty is a labrador and likes to play hide and seek.\n",
|
||||
" \"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84e0a241",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, we extracted the required entities and their properties in the required format:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "f771df58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Properties(person_name='Alex', person_height=5, person_hair_color='blonde', dog_breed='labrador', dog_name='Frosty'),\n",
|
||||
" Properties(person_name='Claudia', person_height=6, person_hair_color='brunette', dog_breed=None, dog_name=None)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(inp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0df61283",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -138,9 +138,9 @@ len(docs_svm)
|
||||
4
|
||||
|
||||
Some common ways to improve on vector similarity search include:
|
||||
- `MultiQueryRetriever` [generates variants of the input question](/docs/modules/data_connection/retrievers/how_to/MultiQueryRetriever) to improve retrieval.
|
||||
- `MultiQueryRetriever` [generates variants of the input question](/docs/modules/data_connection/retrievers/MultiQueryRetriever) to improve retrieval.
|
||||
- `Max marginal relevance` selects for [relevance and diversity](https://www.cs.cmu.edu/~jgc/publication/The_Use_MMR_Diversity_Based_LTMIR_1998.pdf) among the retrieved documents.
|
||||
- Documents can be filtered during retrieval using [`metadata` filters](/docs/use_cases/question_answering/document-context-aware-QA).
|
||||
- Documents can be filtered during retrieval using [`metadata` filters](/docs/use_cases/question_answering/how_to/document-context-aware-QA).
|
||||
|
||||
|
||||
```python
|
||||
|
||||
@@ -117,3 +117,38 @@ qa.run(query)
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
## Vectorstore Retriever Options
|
||||
You can adjust how documents are retrieved from your vectorstore depending on the specific task.
|
||||
|
||||
There are two main ways to retrieve documents relevant to a query- Similarity Search and Max Marginal Relevance Search (MMR Search). Similarity Search is the default, but you can use MMR by adding the `search_type` parameter:
|
||||
|
||||
```python
|
||||
docsearch.as_retriever(search_type="mmr")
|
||||
```
|
||||
|
||||
You can also modify the search by passing specific search arguments through the retriever to the search function, using the `search_kwargs` keyword argument.
|
||||
|
||||
- `k` defines how many documents are returned; defaults to 4.
|
||||
- `score_threshold` allows you to set a minimum relevance for documents returned by the retriever, if you are using the "similarity_score_threshold" search type.
|
||||
- `fetch_k` determines the amount of documents to pass to the MMR algorithm; defaults to 20.
|
||||
- `lambda_mult` controls the diversity of results returned by the MMR algorithm, with 1 being minimum diversity and 0 being maximum. Defaults to 0.5.
|
||||
- `filter` allows you to define a filter on what documents should be retrieved, based on the documents' metadata. This has no effect if the Vectorstore doesn't store any metadata.
|
||||
|
||||
Some examples for how these parameters can be used:
|
||||
```python
|
||||
# Retrieve more documents with higher diversity- useful if your dataset has many similar documents
|
||||
docsearch.as_retriever(search_type="mmr", search_kwargs={'k': 6, 'lambda_mult': 0.25})
|
||||
|
||||
# Fetch more documents for the MMR algorithm to consider, but only return the top 5
|
||||
docsearch.as_retriever(search_type="mmr", search_kwargs={'k': 5, 'fetch_k': 50})
|
||||
|
||||
# Only retrieve documents that have a relevance score above a certain threshold
|
||||
docsearch.as_retriever(search_type="similarity_score_threshold", search_kwargs={'score_threshold': 0.8})
|
||||
|
||||
# Only get the single most similar document from the dataset
|
||||
docsearch.as_retriever(search_kwargs={'k': 1})
|
||||
|
||||
# Use a filter to only retrieve documents from a specific paper
|
||||
docsearch.as_retriever(search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}})
|
||||
```
|
||||
@@ -3,7 +3,7 @@ Additionally, we can return the source documents used to answer the question by
|
||||
|
||||
|
||||
```python
|
||||
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(), return_source_documents=True)
|
||||
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(search_type="mmr", search_kwargs={'fetch_k': 30}), return_source_documents=True)
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ text_splitter = CharacterTextSplitter(
|
||||
chunk_size = 1000,
|
||||
chunk_overlap = 200,
|
||||
length_function = len,
|
||||
is_separator_regex = False,
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size = 100,
|
||||
chunk_overlap = 20,
|
||||
length_function = len,
|
||||
is_separator_regex = False,
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ We'll use a Pinecone vector store in this example.
|
||||
|
||||
First we'll want to create a `Pinecone` VectorStore and seed it with some data. We've created a small demo set of documents that contain summaries of movies.
|
||||
|
||||
To use Pinecone, you to have `pinecone` package installed and you must have an API key and an Environment. Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart).
|
||||
To use Pinecone, you need to have `pinecone` package installed and you must have an API key and an Environment. Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart).
|
||||
|
||||
NOTE: The self-query retriever requires you to have `lark` package installed.
|
||||
|
||||
@@ -85,7 +85,7 @@ retriever = SelfQueryRetriever.from_llm(llm, vectorstore, document_content_descr
|
||||
```
|
||||
|
||||
## Testing it out
|
||||
And now we can try actually using our retriever!
|
||||
And now we can actually try using our retriever!
|
||||
|
||||
|
||||
```python
|
||||
@@ -198,4 +198,4 @@ retriever = SelfQueryRetriever.from_llm(
|
||||
```python
|
||||
# This example only specifies a relevant query
|
||||
retriever.get_relevant_documents("What are two movies about dinosaurs")
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,140 +1,115 @@
|
||||
Here's the simplest example:
|
||||
Typically, language models expect the prompt to either be a string or else a list of chat messages.
|
||||
|
||||
## Prompt template
|
||||
|
||||
Use `PromptTemplate` to create a template for a string prompt.
|
||||
|
||||
By default, `PromptTemplate` uses [Python's str.format](https://docs.python.org/3/library/stdtypes.html#str.format)
|
||||
syntax for templating; however other templating syntax is available (e.g., `jinja2`).
|
||||
|
||||
```python
|
||||
from langchain import PromptTemplate
|
||||
|
||||
|
||||
template = """\
|
||||
You are a naming consultant for new companies.
|
||||
What is a good name for a company that makes {product}?
|
||||
"""
|
||||
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
prompt.format(product="colorful socks")
|
||||
prompt_template = PromptTemplate.from_template(
|
||||
"Tell me a {adjective} joke about {content}."
|
||||
)
|
||||
prompt_template.format(adjective="funny", content="chickens")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
You are a naming consultant for new companies.
|
||||
What is a good name for a company that makes colorful socks?
|
||||
"Tell me a funny joke about chickens."
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
## Create a prompt template
|
||||
|
||||
You can create simple hardcoded prompts using the `PromptTemplate` class. Prompt templates can take any number of input variables, and can be formatted to generate a prompt.
|
||||
|
||||
The template supports any number of variables, including no variables:
|
||||
|
||||
```python
|
||||
from langchain import PromptTemplate
|
||||
|
||||
# An example prompt with no input variables
|
||||
no_input_prompt = PromptTemplate(input_variables=[], template="Tell me a joke.")
|
||||
no_input_prompt.format()
|
||||
# -> "Tell me a joke."
|
||||
|
||||
# An example prompt with one input variable
|
||||
one_input_prompt = PromptTemplate(input_variables=["adjective"], template="Tell me a {adjective} joke.")
|
||||
one_input_prompt.format(adjective="funny")
|
||||
# -> "Tell me a funny joke."
|
||||
|
||||
# An example prompt with multiple input variables
|
||||
multiple_input_prompt = PromptTemplate(
|
||||
input_variables=["adjective", "content"],
|
||||
template="Tell me a {adjective} joke about {content}."
|
||||
prompt_template = PromptTemplate.from_template(
|
||||
"Tell me a joke"
|
||||
)
|
||||
multiple_input_prompt.format(adjective="funny", content="chickens")
|
||||
# -> "Tell me a funny joke about chickens."
|
||||
prompt_template.format()
|
||||
```
|
||||
|
||||
If you do not wish to specify `input_variables` manually, you can also create a `PromptTemplate` using `from_template` class method. `langchain` will automatically infer the `input_variables` based on the `template` passed.
|
||||
For additional validation, specify `input_variables` explicitly. These variables
|
||||
will be compared against the variables present in the template string during instantiation, raising an exception if
|
||||
there is a mismatch; for example,
|
||||
|
||||
```python
|
||||
template = "Tell me a {adjective} joke about {content}."
|
||||
from langchain import PromptTemplate
|
||||
|
||||
prompt_template = PromptTemplate.from_template(template)
|
||||
prompt_template.input_variables
|
||||
# -> ['adjective', 'content']
|
||||
prompt_template.format(adjective="funny", content="chickens")
|
||||
# -> Tell me a funny joke about chickens.
|
||||
invalid_prompt = PromptTemplate(
|
||||
input_variables=["adjective"],
|
||||
template="Tell me a {adjective} joke about {content}."
|
||||
)
|
||||
```
|
||||
|
||||
You can create custom prompt templates that format the prompt in any way you want. For more information, see [Custom Prompt Templates](./custom_prompt_template.html).
|
||||
|
||||
You can create custom prompt templates that format the prompt in any way you want.
|
||||
For more information, see [Custom Prompt Templates](./custom_prompt_template.html).
|
||||
|
||||
<!-- TODO(shreya): Add link to Jinja -->
|
||||
|
||||
## Chat prompt template
|
||||
|
||||
[Chat Models](../models/chat) take a list of chat messages as input - this list commonly referred to as a `prompt`.
|
||||
These chat messages differ from raw string (which you would pass into a [LLM](/docs/modules/model_io/models/llms) model) in that every message is associated with a `role`.
|
||||
|
||||
For example, in OpenAI [Chat Completion API](https://platform.openai.com/docs/guides/chat/introduction), a chat message can be associated with the AI, human or system role. The model is supposed to follow instruction from system chat message more closely.
|
||||
|
||||
LangChain provides several prompt templates to make constructing and working with prompts easy. You are encouraged to use these chat related prompt templates instead of `PromptTemplate` when querying chat models to fully utilize the potential of the underlying chat model.
|
||||
|
||||
|
||||
The prompt to [Chat Models](../models/chat) is a list of chat messages.
|
||||
|
||||
Each chat message is associated with content, and an additional parameter called `role`.
|
||||
For example, in the OpenAI [Chat Completions API](https://platform.openai.com/docs/guides/chat/introduction), a chat message can be associated with an AI assistant, a human or a system role.
|
||||
|
||||
Create a chat prompt template like this:
|
||||
|
||||
```python
|
||||
from langchain.prompts import (
|
||||
ChatPromptTemplate,
|
||||
PromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
AIMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
|
||||
template = ChatPromptTemplate.from_messages([
|
||||
("system", "You are a helpful AI bot. Your name is {name}."),
|
||||
("human", "Hello, how are you doing?"),
|
||||
("ai", "I'm doing well, thanks!"),
|
||||
("human", "{user_input}"),
|
||||
])
|
||||
|
||||
messages = template.format_messages(
|
||||
name="Bob",
|
||||
user_input="What is your name?"
|
||||
)
|
||||
```
|
||||
|
||||
To create a message template associated with a role, you use `MessagePromptTemplate`.
|
||||
|
||||
For convenience, there is a `from_template` method exposed on the template. If you were to use this template, this is what it would look like:
|
||||
`ChatPromptTemplate.from_messages` accepts a variety of message representations.
|
||||
|
||||
For example, in addition to using the 2-tuple representation of (type, content) used
|
||||
above, you could pass in an instance of `MessagePromptTemplate` or `BaseMessage`.
|
||||
|
||||
```python
|
||||
template="You are a helpful assistant that translates {input_language} to {output_language}."
|
||||
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
||||
human_template="{text}"
|
||||
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
||||
```
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain.prompts.chat import SystemMessage, HumanMessagePromptTemplate
|
||||
|
||||
If you wanted to construct the `MessagePromptTemplate` more directly, you could create a PromptTemplate outside and then pass it in, eg:
|
||||
|
||||
|
||||
```python
|
||||
prompt=PromptTemplate(
|
||||
template="You are a helpful assistant that translates {input_language} to {output_language}.",
|
||||
input_variables=["input_language", "output_language"],
|
||||
template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessage(
|
||||
content=(
|
||||
"You are a helpful assistant that re-writes the user's text to "
|
||||
"sound more upbeat."
|
||||
)
|
||||
),
|
||||
HumanMessagePromptTemplate.from_template("{text}"),
|
||||
]
|
||||
)
|
||||
system_message_prompt_2 = SystemMessagePromptTemplate(prompt=prompt)
|
||||
|
||||
assert system_message_prompt == system_message_prompt_2
|
||||
```
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
After that, you can build a `ChatPromptTemplate` from one or more `MessagePromptTemplates`. You can use `ChatPromptTemplate`'s `format_prompt` -- this returns a `PromptValue`, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model.
|
||||
|
||||
|
||||
```python
|
||||
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
||||
|
||||
# get a chat completion from the formatted messages
|
||||
chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages()
|
||||
llm = ChatOpenAI()
|
||||
llm(template.format_messages(text='i dont like eating tasty things.'))
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
[SystemMessage(content='You are a helpful assistant that translates English to French.', additional_kwargs={}),
|
||||
HumanMessage(content='I love programming.', additional_kwargs={})]
|
||||
AIMessage(content='I absolutely adore indulging in delicious treats!', additional_kwargs={}, example=False)
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
This provides you with a lot of flexibility in how you construct your chat prompts.
|
||||
|
||||
|
||||
@@ -92,15 +92,24 @@ spell_fix:
|
||||
######################
|
||||
|
||||
help:
|
||||
@echo '----'
|
||||
@echo 'coverage - run unit tests and generate coverage report'
|
||||
@echo '===================='
|
||||
@echo '-- DOCUMENTATION --'
|
||||
@echo 'clean - run docs_clean and api_docs_clean'
|
||||
@echo 'docs_build - build the documentation'
|
||||
@echo 'docs_clean - clean the documentation build artifacts'
|
||||
@echo 'docs_linkcheck - run linkchecker on the documentation'
|
||||
@echo 'api_docs_build - build the API Reference documentation'
|
||||
@echo 'api_docs_clean - clean the API Reference documentation build artifacts'
|
||||
@echo 'api_docs_linkcheck - run linkchecker on the API Reference documentation'
|
||||
@echo '-- LINTING --'
|
||||
@echo 'format - run code formatters'
|
||||
@echo 'lint - run linters'
|
||||
@echo 'spell_check - run codespell on the project'
|
||||
@echo 'spell_fix - run codespell on the project and fix the errors'
|
||||
@echo '-- TESTS --'
|
||||
@echo 'coverage - run unit tests and generate coverage report'
|
||||
@echo 'test - run unit tests'
|
||||
@echo 'tests - run unit tests'
|
||||
@echo 'tests - run unit tests (alias for "make test")'
|
||||
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
||||
@echo 'extended_tests - run only extended unit tests'
|
||||
@echo 'test_watch - run unit tests in watch mode'
|
||||
|
||||
@@ -897,7 +897,10 @@ s
|
||||
else:
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = InvalidTool().run(
|
||||
agent_action.tool,
|
||||
{
|
||||
"requested_tool_name": agent_action.tool,
|
||||
"available_tool_names": list(name_to_tool_map.keys()),
|
||||
},
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
@@ -992,7 +995,10 @@ s
|
||||
else:
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = await InvalidTool().arun(
|
||||
agent_action.tool,
|
||||
{
|
||||
"requested_tool_name": agent_action.tool,
|
||||
"available_tool_names": list(name_to_tool_map.keys()),
|
||||
},
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
|
||||
@@ -17,7 +17,7 @@ from langchain.agents.agent_toolkits.gmail.toolkit import GmailToolkit
|
||||
from langchain.agents.agent_toolkits.jira.toolkit import JiraToolkit
|
||||
from langchain.agents.agent_toolkits.json.base import create_json_agent
|
||||
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain.agents.agent_toolkits.multion.base import create_multion_agent
|
||||
from langchain.agents.agent_toolkits.multion.toolkit import MultionToolkit
|
||||
from langchain.agents.agent_toolkits.nla.toolkit import NLAToolkit
|
||||
from langchain.agents.agent_toolkits.office365.toolkit import O365Toolkit
|
||||
from langchain.agents.agent_toolkits.openapi.base import create_openapi_agent
|
||||
@@ -52,6 +52,7 @@ __all__ = [
|
||||
"GmailToolkit",
|
||||
"JiraToolkit",
|
||||
"JsonToolkit",
|
||||
"MultionToolkit",
|
||||
"NLAToolkit",
|
||||
"O365Toolkit",
|
||||
"OpenAPIToolkit",
|
||||
@@ -65,7 +66,6 @@ __all__ = [
|
||||
"ZapierToolkit",
|
||||
"create_csv_agent",
|
||||
"create_json_agent",
|
||||
"create_multion_agent",
|
||||
"create_openapi_agent",
|
||||
"create_pandas_dataframe_agent",
|
||||
"create_pbi_agent",
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
"""MultiOn agent."""
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent
|
||||
from langchain.agents.agent_toolkits.python.prompt import PREFIX
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
||||
from langchain.agents.types import AgentType
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.schema import SystemMessage
|
||||
from langchain.tools.multion.tool import MultionClientTool
|
||||
|
||||
|
||||
def create_multion_agent(
|
||||
llm: BaseLanguageModel,
|
||||
tool: MultionClientTool,
|
||||
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
verbose: bool = False,
|
||||
prefix: str = PREFIX,
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Dict[str, Any],
|
||||
) -> AgentExecutor:
|
||||
"""Construct a multion agent from an LLM and tool."""
|
||||
tools = [tool]
|
||||
agent: BaseSingleActionAgent
|
||||
|
||||
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
|
||||
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
|
||||
elif agent_type == AgentType.OPENAI_FUNCTIONS:
|
||||
system_message = SystemMessage(content=prefix)
|
||||
_prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
|
||||
agent = OpenAIFunctionsAgent(
|
||||
llm=llm,
|
||||
prompt=_prompt,
|
||||
tools=[tool],
|
||||
callback_manager=callback_manager,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Agent type {agent_type} not supported at the moment.")
|
||||
return AgentExecutor.from_agent_and_tools(
|
||||
agent=agent,
|
||||
tools=tools,
|
||||
callback_manager=callback_manager,
|
||||
verbose=verbose,
|
||||
**(agent_executor_kwargs or {}),
|
||||
)
|
||||
@@ -0,0 +1,22 @@
|
||||
"""MultiOn agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.multion.create_session import MultionCreateSession
|
||||
from langchain.tools.multion.update_session import MultionUpdateSession
|
||||
|
||||
|
||||
class MultionToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with the Browser Agent"""
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
return [MultionCreateSession(), MultionUpdateSession()]
|
||||
@@ -57,10 +57,6 @@ from langchain.utilities.wikipedia import WikipediaAPIWrapper
|
||||
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
|
||||
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
||||
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
from langchain.tools.google_drive.tool import (
|
||||
GoogleDriveSearchTool,
|
||||
GoogleDriveAPIWrapper,
|
||||
)
|
||||
|
||||
|
||||
def _get_python_repl() -> BaseTool:
|
||||
@@ -184,10 +180,6 @@ def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
|
||||
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
|
||||
|
||||
|
||||
def _get_google_drive_search(**kwargs: Any) -> BaseTool:
|
||||
return GoogleDriveSearchTool(api_wrapper=GoogleDriveAPIWrapper(**kwargs))
|
||||
|
||||
|
||||
def _get_google_search(**kwargs: Any) -> BaseTool:
|
||||
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
|
||||
|
||||
@@ -295,15 +287,6 @@ _EXTRA_LLM_TOOLS: Dict[
|
||||
|
||||
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
|
||||
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
|
||||
"google-drive-search": (
|
||||
_get_google_drive_search,
|
||||
[
|
||||
"gdrive_api_file",
|
||||
"folder_id",
|
||||
"mime_type",
|
||||
"template",
|
||||
],
|
||||
),
|
||||
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
|
||||
"google-search-results-json": (
|
||||
_get_google_search_results_json,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Interface for tools."""
|
||||
from typing import Optional
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
@@ -12,23 +12,33 @@ class InvalidTool(BaseTool):
|
||||
"""Tool that is run when invalid tool name is encountered by agent."""
|
||||
|
||||
name = "invalid_tool"
|
||||
"""Name of the tool."""
|
||||
description = "Called when tool name is invalid."
|
||||
"""Description of the tool."""
|
||||
description = "Called when tool name is invalid. Suggests valid tool names."
|
||||
|
||||
def _run(
|
||||
self, tool_name: str, run_manager: Optional[CallbackManagerForToolRun] = None
|
||||
self,
|
||||
requested_tool_name: str,
|
||||
available_tool_names: List[str],
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return f"{tool_name} is not a valid tool, try another one."
|
||||
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
|
||||
return (
|
||||
f"{requested_tool_name} is not a valid tool, "
|
||||
f"try one of [{available_tool_names_str}]."
|
||||
)
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
tool_name: str,
|
||||
requested_tool_name: str,
|
||||
available_tool_names: List[str],
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
return f"{tool_name} is not a valid tool, try another one."
|
||||
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
|
||||
return (
|
||||
f"{requested_tool_name} is not a valid tool, "
|
||||
f"try one of [{available_tool_names_str}]."
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["InvalidTool", "BaseTool", "tool", "Tool"]
|
||||
|
||||
@@ -730,7 +730,7 @@ class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
|
||||
|
||||
def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
error: BaseException,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors.
|
||||
@@ -812,7 +812,7 @@ class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
|
||||
|
||||
async def on_chain_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
error: BaseException,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors.
|
||||
|
||||
@@ -71,7 +71,7 @@ class EvaluatorCallbackHandler(BaseTracer):
|
||||
client: Optional[Client] = None,
|
||||
example_id: Optional[Union[UUID, str]] = None,
|
||||
skip_unfinished: bool = True,
|
||||
project_name: Optional[str] = None,
|
||||
project_name: Optional[str] = "evaluators",
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@@ -114,17 +114,55 @@ def _load_map_reduce_documents_chain(
|
||||
if not isinstance(llm_chain, LLMChain):
|
||||
raise ValueError(f"Expected LLMChain, got {llm_chain}")
|
||||
|
||||
if "combine_document_chain" in config:
|
||||
if "reduce_documents_chain" in config:
|
||||
reduce_documents_chain = load_chain_from_config(
|
||||
config.pop("reduce_documents_chain")
|
||||
)
|
||||
elif "reduce_documents_chain_path" in config:
|
||||
reduce_documents_chain = load_chain(config.pop("reduce_documents_chain_path"))
|
||||
else:
|
||||
reduce_documents_chain = _load_reduce_documents_chain(config)
|
||||
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
|
||||
combine_documents_chain = None
|
||||
collapse_documents_chain = None
|
||||
|
||||
if "combine_documents_chain" in config:
|
||||
combine_document_chain_config = config.pop("combine_documents_chain")
|
||||
combine_documents_chain = load_chain_from_config(combine_document_chain_config)
|
||||
elif "combine_document_chain" in config:
|
||||
combine_document_chain_config = config.pop("combine_document_chain")
|
||||
combine_documents_chain = load_chain_from_config(combine_document_chain_config)
|
||||
elif "combine_documents_chain_path" in config:
|
||||
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
|
||||
elif "combine_document_chain_path" in config:
|
||||
combine_documents_chain = load_chain(config.pop("combine_document_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `combine_document_chain` or "
|
||||
"`combine_document_chain_path` must be present."
|
||||
"One of `combine_documents_chain` or "
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
if "collapse_document_chain" in config:
|
||||
|
||||
if "collapse_documents_chain" in config:
|
||||
collapse_document_chain_config = config.pop("collapse_documents_chain")
|
||||
if collapse_document_chain_config is None:
|
||||
collapse_documents_chain = None
|
||||
else:
|
||||
collapse_documents_chain = load_chain_from_config(
|
||||
collapse_document_chain_config
|
||||
)
|
||||
elif "collapse_documents_chain_path" in config:
|
||||
collapse_documents_chain = load_chain(
|
||||
config.pop("collapse_documents_chain_path")
|
||||
)
|
||||
elif "collapse_document_chain" in config:
|
||||
collapse_document_chain_config = config.pop("collapse_document_chain")
|
||||
if collapse_document_chain_config is None:
|
||||
collapse_documents_chain = None
|
||||
@@ -136,15 +174,10 @@ def _load_map_reduce_documents_chain(
|
||||
collapse_documents_chain = load_chain(
|
||||
config.pop("collapse_document_chain_path")
|
||||
)
|
||||
else:
|
||||
collapse_documents_chain = None
|
||||
reduce_documents_chain = ReduceDocumentsChain(
|
||||
|
||||
return ReduceDocumentsChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
collapse_documents_chain=collapse_documents_chain,
|
||||
)
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
**config,
|
||||
)
|
||||
|
||||
@@ -497,6 +530,7 @@ type_to_loader_dict = {
|
||||
"qa_with_sources_chain": _load_qa_with_sources_chain,
|
||||
"stuff_documents_chain": _load_stuff_documents_chain,
|
||||
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
|
||||
"reduce_documents_chain": _load_reduce_documents_chain,
|
||||
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
|
||||
"refine_documents_chain": _load_refine_documents_chain,
|
||||
"sql_database_chain": _load_sql_database_chain,
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
"""Methods for creating chains that use OpenAI function-calling APIs."""
|
||||
import inspect
|
||||
import re
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -25,8 +34,7 @@ PYTHON_TO_JSON_TYPES = {
|
||||
|
||||
def _get_python_function_name(function: Callable) -> str:
|
||||
"""Get the name of a Python function."""
|
||||
source = inspect.getsource(function)
|
||||
return re.search(r"^def (.*)\(", source).groups()[0] # type: ignore
|
||||
return function.__name__
|
||||
|
||||
|
||||
def _parse_python_function_docstring(function: Callable) -> Tuple[str, dict]:
|
||||
@@ -94,10 +102,16 @@ def _get_python_function_required_args(function: Callable) -> List[str]:
|
||||
spec = inspect.getfullargspec(function)
|
||||
required = spec.args[: -len(spec.defaults)] if spec.defaults else spec.args
|
||||
required += [k for k in spec.kwonlyargs if k not in (spec.kwonlydefaults or {})]
|
||||
|
||||
is_class = type(function) is type
|
||||
if is_class and required[0] == "self":
|
||||
required = required[1:]
|
||||
return required
|
||||
|
||||
|
||||
def convert_python_function_to_openai_function(function: Callable) -> Dict[str, Any]:
|
||||
def convert_python_function_to_openai_function(
|
||||
function: Callable,
|
||||
) -> Dict[str, Any]:
|
||||
"""Convert a Python function to an OpenAI function-calling API compatible dict.
|
||||
|
||||
Assumes the Python function has type hints and a docstring with a description. If
|
||||
|
||||
@@ -83,6 +83,7 @@ def _load_stuff_chain(
|
||||
document_variable_name=document_variable_name,
|
||||
verbose=verbose,
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@@ -209,6 +210,7 @@ def _load_refine_chain(
|
||||
initial_response_name=initial_response_name,
|
||||
verbose=verbose,
|
||||
callback_manager=callback_manager,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ from pydantic import Extra, Field, root_validator
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForChainRun,
|
||||
CallbackManagerForChainRun,
|
||||
Callbacks,
|
||||
)
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
@@ -65,11 +66,12 @@ class BaseRetrievalQA(Chain):
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
prompt: Optional[PromptTemplate] = None,
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseRetrievalQA:
|
||||
"""Initialize from LLM."""
|
||||
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
|
||||
llm_chain = LLMChain(llm=llm, prompt=_prompt)
|
||||
llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks)
|
||||
document_prompt = PromptTemplate(
|
||||
input_variables=["page_content"], template="Context:\n{page_content}"
|
||||
)
|
||||
@@ -77,9 +79,14 @@ class BaseRetrievalQA(Chain):
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name="context",
|
||||
document_prompt=document_prompt,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
|
||||
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
||||
return cls(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_chain_type(
|
||||
|
||||
@@ -27,5 +27,5 @@ modifications are needed.
|
||||
<< INPUT >>
|
||||
{{input}}
|
||||
|
||||
<< OUTPUT >>
|
||||
<< OUTPUT (must include ```json at the start of the response) >>
|
||||
"""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Load summarizing chains."""
|
||||
from typing import Any, Mapping, Optional, Protocol
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
|
||||
@@ -49,16 +50,22 @@ def _load_map_reduce_chain(
|
||||
collapse_llm: Optional[BaseLanguageModel] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
token_max: int = 3000,
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose)
|
||||
map_chain = LLMChain(
|
||||
llm=llm, prompt=map_prompt, verbose=verbose, callbacks=callbacks
|
||||
)
|
||||
_reduce_llm = reduce_llm or llm
|
||||
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose)
|
||||
reduce_chain = LLMChain(
|
||||
llm=_reduce_llm, prompt=combine_prompt, verbose=verbose, callbacks=callbacks
|
||||
)
|
||||
# TODO: document prompt
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_chain,
|
||||
document_variable_name=combine_document_variable_name,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
if collapse_prompt is None:
|
||||
collapse_chain = None
|
||||
@@ -74,6 +81,7 @@ def _load_map_reduce_chain(
|
||||
llm=_collapse_llm,
|
||||
prompt=collapse_prompt,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
),
|
||||
document_variable_name=combine_document_variable_name,
|
||||
)
|
||||
@@ -82,12 +90,14 @@ def _load_map_reduce_chain(
|
||||
collapse_documents_chain=collapse_chain,
|
||||
token_max=token_max,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=map_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
document_variable_name=map_reduce_document_variable_name,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
"""
|
||||
|
||||
deployment_name: str = ""
|
||||
openai_api_type: str = "azure"
|
||||
openai_api_type: str = ""
|
||||
openai_api_base: str = ""
|
||||
openai_api_version: str = ""
|
||||
openai_api_key: str = ""
|
||||
@@ -71,9 +71,7 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
"OPENAI_API_VERSION",
|
||||
)
|
||||
values["openai_api_type"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_type",
|
||||
"OPENAI_API_TYPE",
|
||||
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
|
||||
)
|
||||
values["openai_organization"] = get_from_dict_or_env(
|
||||
values,
|
||||
|
||||
@@ -381,9 +381,10 @@ class ChatOpenAI(BaseChatModel):
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if self.streaming:
|
||||
if stream if stream is not None else self.streaming:
|
||||
generation: Optional[ChatGenerationChunk] = None
|
||||
for chunk in self._stream(
|
||||
messages=messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
@@ -454,9 +455,10 @@ class ChatOpenAI(BaseChatModel):
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if self.streaming:
|
||||
if stream if stream is not None else self.streaming:
|
||||
generation: Optional[ChatGenerationChunk] = None
|
||||
async for chunk in self._astream(
|
||||
messages=messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
|
||||
@@ -43,13 +43,16 @@ class PromptLayerChatOpenAI(ChatOpenAI):
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any
|
||||
) -> ChatResult:
|
||||
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
|
||||
from promptlayer.utils import get_api_key, promptlayer_api_request
|
||||
|
||||
request_start_time = datetime.datetime.now().timestamp()
|
||||
generated_responses = super()._generate(messages, stop, run_manager, **kwargs)
|
||||
generated_responses = super()._generate(
|
||||
messages, stop, run_manager, stream=stream, **kwargs
|
||||
)
|
||||
request_end_time = datetime.datetime.now().timestamp()
|
||||
message_dicts, params = super()._create_message_dicts(messages, stop)
|
||||
for i, generation in enumerate(generated_responses.generations):
|
||||
@@ -82,13 +85,16 @@ class PromptLayerChatOpenAI(ChatOpenAI):
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any
|
||||
) -> ChatResult:
|
||||
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
|
||||
from promptlayer.utils import get_api_key, promptlayer_api_request_async
|
||||
|
||||
request_start_time = datetime.datetime.now().timestamp()
|
||||
generated_responses = await super()._agenerate(messages, stop, run_manager)
|
||||
generated_responses = await super()._agenerate(
|
||||
messages, stop, run_manager, stream=stream, **kwargs
|
||||
)
|
||||
request_end_time = datetime.datetime.now().timestamp()
|
||||
message_dicts, params = super()._create_message_dicts(messages, stop)
|
||||
for i, generation in enumerate(generated_responses.generations):
|
||||
|
||||
@@ -111,7 +111,7 @@ class ChatVertexAI(_VertexAICommon, BaseChatModel):
|
||||
|
||||
values["client"] = ChatModel.from_pretrained(values["model_name"])
|
||||
except ImportError:
|
||||
raise_vertex_import_error()
|
||||
raise_vertex_import_error(minimum_expected_version="1.29.0")
|
||||
return values
|
||||
|
||||
def _generate(
|
||||
@@ -155,7 +155,7 @@ class ChatVertexAI(_VertexAICommon, BaseChatModel):
|
||||
context=context, message_history=history.history, **params
|
||||
)
|
||||
else:
|
||||
chat = self.client.start_chat(**params)
|
||||
chat = self.client.start_chat(message_history=history.history, **params)
|
||||
response = chat.send_message(question.content)
|
||||
text = self._enforce_stop_words(response.text, stop)
|
||||
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))])
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Interface to access to place that stores documents."""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Union
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
|
||||
@@ -16,6 +16,10 @@ class Docstore(ABC):
|
||||
If page does not exist, return similar entries.
|
||||
"""
|
||||
|
||||
def delete(self, ids: List) -> None:
|
||||
"""Deleting IDs from in memory dictionary."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AddableMixin(ABC):
|
||||
"""Mixin class that supports adding texts."""
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Simple in memory docstore in the form of a dict."""
|
||||
from typing import Dict, Optional, Union
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from langchain.docstore.base import AddableMixin, Docstore
|
||||
from langchain.docstore.document import Document
|
||||
@@ -26,6 +26,14 @@ class InMemoryDocstore(Docstore, AddableMixin):
|
||||
raise ValueError(f"Tried to add ids that already exist: {overlapping}")
|
||||
self._dict = {**self._dict, **texts}
|
||||
|
||||
def delete(self, ids: List) -> None:
|
||||
"""Deleting IDs from in memory dictionary."""
|
||||
overlapping = set(ids).intersection(self._dict)
|
||||
if not overlapping:
|
||||
raise ValueError(f"Tried to delete ids that does not exist: {ids}")
|
||||
for _id in ids:
|
||||
self._dict.pop(_id)
|
||||
|
||||
def search(self, search: str) -> Union[str, Document]:
|
||||
"""Search via direct lookup.
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ from langchain.document_loaders.geodataframe import GeoDataFrameLoader
|
||||
from langchain.document_loaders.git import GitLoader
|
||||
from langchain.document_loaders.gitbook import GitbookLoader
|
||||
from langchain.document_loaders.github import GitHubIssuesLoader
|
||||
from langchain.document_loaders.google_drive import GoogleDriveLoader
|
||||
from langchain.document_loaders.googledrive import GoogleDriveLoader
|
||||
from langchain.document_loaders.gutenberg import GutenbergLoader
|
||||
from langchain.document_loaders.hn import HNLoader
|
||||
from langchain.document_loaders.html import UnstructuredHTMLLoader
|
||||
@@ -108,6 +108,7 @@ from langchain.document_loaders.onedrive_file import OneDriveFileLoader
|
||||
from langchain.document_loaders.open_city_data import OpenCityDataLoader
|
||||
from langchain.document_loaders.org_mode import UnstructuredOrgModeLoader
|
||||
from langchain.document_loaders.pdf import (
|
||||
AmazonTextractPDFLoader,
|
||||
MathpixPDFLoader,
|
||||
OnlinePDFLoader,
|
||||
PDFMinerLoader,
|
||||
@@ -128,6 +129,7 @@ from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader
|
||||
from langchain.document_loaders.reddit import RedditPostsLoader
|
||||
from langchain.document_loaders.roam import RoamLoader
|
||||
from langchain.document_loaders.rocksetdb import RocksetLoader
|
||||
from langchain.document_loaders.rss import RSSFeedLoader
|
||||
from langchain.document_loaders.rst import UnstructuredRSTLoader
|
||||
from langchain.document_loaders.rtf import UnstructuredRTFLoader
|
||||
from langchain.document_loaders.s3_directory import S3DirectoryLoader
|
||||
@@ -280,6 +282,7 @@ __all__ = [
|
||||
"RedditPostsLoader",
|
||||
"RoamLoader",
|
||||
"RocksetLoader",
|
||||
"RSSFeedLoader",
|
||||
"S3DirectoryLoader",
|
||||
"S3FileLoader",
|
||||
"SRTLoader",
|
||||
@@ -328,4 +331,5 @@ __all__ = [
|
||||
"YoutubeAudioLoader",
|
||||
"YoutubeLoader",
|
||||
"ConcurrentLoader",
|
||||
"AmazonTextractPDFLoader",
|
||||
]
|
||||
|
||||
@@ -54,12 +54,14 @@ class BiliBiliLoader(BaseLoader):
|
||||
|
||||
video_info = sync(v.get_info())
|
||||
video_info.update({"url": url})
|
||||
sub = sync(v.get_subtitle(video_info["cid"]))
|
||||
|
||||
# Get subtitle url
|
||||
subtitle = video_info.pop("subtitle")
|
||||
sub_list = subtitle["list"]
|
||||
sub_list = sub["subtitles"]
|
||||
if sub_list:
|
||||
sub_url = sub_list[0]["subtitle_url"]
|
||||
if not sub_url.startswith("http"):
|
||||
sub_url = "https:" + sub_url
|
||||
result = requests.get(sub_url)
|
||||
raw_sub_titles = json.loads(result.content)["body"]
|
||||
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
"""Loads data from Google Drive.
|
||||
|
||||
Prerequisites:
|
||||
1. Create a Google Cloud project
|
||||
2. Enable the Google Drive API:
|
||||
https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
|
||||
3. Authorize credentials for desktop app:
|
||||
https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application
|
||||
4. For service accounts visit
|
||||
https://cloud.google.com/iam/docs/service-accounts-create
|
||||
""" # noqa: E501
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
)
|
||||
|
||||
from pydantic.class_validators import root_validator
|
||||
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import Document
|
||||
from langchain.utilities.google_drive import (
|
||||
GoogleDriveUtilities,
|
||||
get_template,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GoogleDriveLoader(BaseLoader, GoogleDriveUtilities):
|
||||
"""Loads data from Google Drive."""
|
||||
|
||||
document_ids: Optional[Sequence[str]] = None
|
||||
""" A list of ids of google drive documents to load."""
|
||||
|
||||
file_ids: Optional[Sequence[str]] = None
|
||||
"""A list of ids of google drive files to load."""
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_older_api_and_new_environment_variable(
|
||||
cls, v: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
service_account_key = v.get("service_account_key")
|
||||
credentials_path = v.get("credentials_path")
|
||||
api_file = v.get("gdrive_api_file")
|
||||
|
||||
if service_account_key:
|
||||
warnings.warn(
|
||||
"service_account_key was deprecated. Use GOOGLE_ACCOUNT_FILE env "
|
||||
"variable.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
if credentials_path:
|
||||
warnings.warn(
|
||||
"service_account_key was deprecated. Use GOOGLE_ACCOUNT_FILE env "
|
||||
"variable.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
if service_account_key and credentials_path:
|
||||
raise ValueError("Select only service_account_key or service_account_key")
|
||||
|
||||
folder_id = v.get("folder_id")
|
||||
document_ids = v.get("document_ids")
|
||||
file_ids = v.get("file_ids")
|
||||
|
||||
if folder_id and (document_ids or file_ids):
|
||||
raise ValueError(
|
||||
"Cannot specify both folder_id and document_ids nor "
|
||||
"folder_id and file_ids"
|
||||
)
|
||||
|
||||
# To be compatible with the old approach
|
||||
if not api_file:
|
||||
api_file = (
|
||||
Path(os.environ["GOOGLE_ACCOUNT_FILE"])
|
||||
if "GOOGLE_ACCOUNT_FILE" in os.environ
|
||||
else None
|
||||
)
|
||||
# Deprecated: To be compatible with the old approach of authentication
|
||||
if service_account_key:
|
||||
api_file = service_account_key
|
||||
elif credentials_path:
|
||||
api_file = credentials_path
|
||||
elif not api_file:
|
||||
api_file = Path.home() / ".credentials" / "keys.json"
|
||||
v["gdrive_api_file"] = api_file
|
||||
|
||||
if not v.get("template"):
|
||||
if folder_id:
|
||||
template = get_template("gdrive-all-in-folder")
|
||||
elif "document_ids" in v or "file_ids" in v:
|
||||
template = PromptTemplate(input_variables=[], template="")
|
||||
else:
|
||||
raise ValueError("Use a template")
|
||||
v["template"] = template
|
||||
return v
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
ids = self.document_ids or self.file_ids
|
||||
if ids:
|
||||
yield from (self.load_document_from_id(_id) for _id in ids)
|
||||
else:
|
||||
return self.lazy_get_relevant_documents()
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
return list(self.lazy_load())
|
||||
|
||||
|
||||
def lazy_update_description_with_summary(
|
||||
loader: GoogleDriveLoader,
|
||||
llm: BaseLanguageModel,
|
||||
*,
|
||||
force: bool = False,
|
||||
query: str = "",
|
||||
**kwargs: Any,
|
||||
) -> Iterator[Document]:
|
||||
"""Summarize all documents, and update the GDrive metadata `description`.
|
||||
|
||||
Need `write` access: set scope=["https://www.googleapis.com/auth/drive"].
|
||||
|
||||
Note: Update the description of shortcut without touch the target
|
||||
file description.
|
||||
|
||||
Args:
|
||||
llm: Language model to use.
|
||||
force: true to update all files. Else, update only if the description
|
||||
is empty.
|
||||
query: If possible, the query request.
|
||||
kwargs: Others parameters for the template (verbose, prompt, etc).
|
||||
"""
|
||||
try:
|
||||
from googleapiclient.errors import HttpError
|
||||
except ImportError as e:
|
||||
raise ImportError("""Could not import""") from e
|
||||
|
||||
if "https://www.googleapis.com/auth/drive" not in loader._creds.scopes:
|
||||
raise ValueError(
|
||||
f"Remove the file 'token.json' and "
|
||||
f"initialize the {loader.__class__.__name__} with "
|
||||
f"scopes=['https://www.googleapis.com/auth/drive']"
|
||||
)
|
||||
|
||||
chain = load_summarize_chain(llm, chain_type="stuff", **kwargs)
|
||||
updated_files = set() # Never update two time the same document (if it's split)
|
||||
for document in loader.lazy_get_relevant_documents(query, **kwargs):
|
||||
try:
|
||||
file_id = document.metadata["gdriveId"]
|
||||
if file_id not in updated_files:
|
||||
file = loader.files.get(
|
||||
fileId=file_id,
|
||||
fields=loader.fields,
|
||||
supportsAllDrives=True,
|
||||
).execute()
|
||||
if force or not file.get("description", "").strip():
|
||||
summary = chain.run([document]).strip()
|
||||
if summary:
|
||||
loader.files.update(
|
||||
fileId=file_id,
|
||||
supportsAllDrives=True,
|
||||
body={"description": summary},
|
||||
).execute()
|
||||
logger.info(
|
||||
f"For the file '{file['name']}', add description "
|
||||
f"'{summary[:40]}...'"
|
||||
)
|
||||
metadata = loader._extract_meta_data(file)
|
||||
if "summary" in metadata:
|
||||
del metadata["summary"]
|
||||
yield Document(page_content=summary, metadata=metadata)
|
||||
updated_files.add(file_id)
|
||||
except HttpError:
|
||||
logger.warning(
|
||||
f"Impossible to update the description of file "
|
||||
f"'{document.metadata['name']}'"
|
||||
)
|
||||
|
||||
|
||||
def update_description_with_summary(
|
||||
loader: GoogleDriveLoader,
|
||||
llm: BaseLanguageModel,
|
||||
*,
|
||||
force: bool = False,
|
||||
query: str = "",
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Summarize all documents, and update the GDrive metadata `description`.
|
||||
|
||||
Need `write` access: set scope=["https://www.googleapis.com/auth/drive"].
|
||||
|
||||
Note: Update the description of shortcut without touch the target
|
||||
file description.
|
||||
|
||||
Args:
|
||||
llm: Language model to use.
|
||||
force: true to update all files. Else, update only if the description
|
||||
is empty.
|
||||
query: If possible, the query request.
|
||||
kwargs: Others parameters for the template (verbose, prompt, etc).
|
||||
"""
|
||||
return list(
|
||||
lazy_update_description_with_summary(
|
||||
loader, llm, force=force, query=query, **kwargs
|
||||
)
|
||||
)
|
||||
@@ -1,4 +1,353 @@
|
||||
"""DEPRECATED: Kept for backwards compatibility."""
|
||||
from langchain.document_loaders.google_drive import GoogleDriveLoader
|
||||
"""Loads data from Google Drive."""
|
||||
|
||||
__all__ = ["GoogleDriveLoader"]
|
||||
# Prerequisites:
|
||||
# 1. Create a Google Cloud project
|
||||
# 2. Enable the Google Drive API:
|
||||
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
|
||||
# 3. Authorize credentials for desktop app:
|
||||
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
|
||||
# 4. For service accounts visit
|
||||
# https://cloud.google.com/iam/docs/service-accounts-create
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Sequence, Union
|
||||
|
||||
from pydantic import BaseModel, root_validator, validator
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
|
||||
|
||||
|
||||
class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
"""Loads Google Docs from Google Drive."""
|
||||
|
||||
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
|
||||
"""Path to the service account key file."""
|
||||
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
|
||||
"""Path to the credentials file."""
|
||||
token_path: Path = Path.home() / ".credentials" / "token.json"
|
||||
"""Path to the token file."""
|
||||
folder_id: Optional[str] = None
|
||||
"""The folder id to load from."""
|
||||
document_ids: Optional[List[str]] = None
|
||||
"""The document ids to load from."""
|
||||
file_ids: Optional[List[str]] = None
|
||||
"""The file ids to load from."""
|
||||
recursive: bool = False
|
||||
"""Whether to load recursively. Only applies when folder_id is given."""
|
||||
file_types: Optional[Sequence[str]] = None
|
||||
"""The file types to load. Only applies when folder_id is given."""
|
||||
load_trashed_files: bool = False
|
||||
"""Whether to load trashed files. Only applies when folder_id is given."""
|
||||
# NOTE(MthwRobinson) - changing the file_loader_cls to type here currently
|
||||
# results in pydantic validation errors
|
||||
file_loader_cls: Any = None
|
||||
"""The file loader class to use."""
|
||||
file_loader_kwargs: Dict["str", Any] = {}
|
||||
"""The file loader kwargs to use."""
|
||||
|
||||
@root_validator
|
||||
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate that either folder_id or document_ids is set, but not both."""
|
||||
if values.get("folder_id") and (
|
||||
values.get("document_ids") or values.get("file_ids")
|
||||
):
|
||||
raise ValueError(
|
||||
"Cannot specify both folder_id and document_ids nor "
|
||||
"folder_id and file_ids"
|
||||
)
|
||||
if (
|
||||
not values.get("folder_id")
|
||||
and not values.get("document_ids")
|
||||
and not values.get("file_ids")
|
||||
):
|
||||
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
|
||||
|
||||
file_types = values.get("file_types")
|
||||
if file_types:
|
||||
if values.get("document_ids") or values.get("file_ids"):
|
||||
raise ValueError(
|
||||
"file_types can only be given when folder_id is given,"
|
||||
" (not when document_ids or file_ids are given)."
|
||||
)
|
||||
type_mapping = {
|
||||
"document": "application/vnd.google-apps.document",
|
||||
"sheet": "application/vnd.google-apps.spreadsheet",
|
||||
"pdf": "application/pdf",
|
||||
}
|
||||
allowed_types = list(type_mapping.keys()) + list(type_mapping.values())
|
||||
short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()])
|
||||
full_names = ", ".join([f"'{x}'" for x in type_mapping.values()])
|
||||
for file_type in file_types:
|
||||
if file_type not in allowed_types:
|
||||
raise ValueError(
|
||||
f"Given file type {file_type} is not supported. "
|
||||
f"Supported values are: {short_names}; and "
|
||||
f"their full-form names: {full_names}"
|
||||
)
|
||||
|
||||
# replace short-form file types by full-form file types
|
||||
def full_form(x: str) -> str:
|
||||
return type_mapping[x] if x in type_mapping else x
|
||||
|
||||
values["file_types"] = [full_form(file_type) for file_type in file_types]
|
||||
return values
|
||||
|
||||
@validator("credentials_path")
|
||||
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
|
||||
"""Validate that credentials_path exists."""
|
||||
if not v.exists():
|
||||
raise ValueError(f"credentials_path {v} does not exist")
|
||||
return v
|
||||
|
||||
def _load_credentials(self) -> Any:
|
||||
"""Load credentials."""
|
||||
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
|
||||
try:
|
||||
from google.auth import default
|
||||
from google.auth.transport.requests import Request
|
||||
from google.oauth2 import service_account
|
||||
from google.oauth2.credentials import Credentials
|
||||
from google_auth_oauthlib.flow import InstalledAppFlow
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"You must run "
|
||||
"`pip install --upgrade "
|
||||
"google-api-python-client google-auth-httplib2 "
|
||||
"google-auth-oauthlib` "
|
||||
"to use the Google Drive loader."
|
||||
)
|
||||
|
||||
creds = None
|
||||
if self.service_account_key.exists():
|
||||
return service_account.Credentials.from_service_account_file(
|
||||
str(self.service_account_key), scopes=SCOPES
|
||||
)
|
||||
|
||||
if self.token_path.exists():
|
||||
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
|
||||
|
||||
if not creds or not creds.valid:
|
||||
if creds and creds.expired and creds.refresh_token:
|
||||
creds.refresh(Request())
|
||||
elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
|
||||
creds, project = default()
|
||||
creds = creds.with_scopes(SCOPES)
|
||||
# no need to write to file
|
||||
if creds:
|
||||
return creds
|
||||
else:
|
||||
flow = InstalledAppFlow.from_client_secrets_file(
|
||||
str(self.credentials_path), SCOPES
|
||||
)
|
||||
creds = flow.run_local_server(port=0)
|
||||
with open(self.token_path, "w") as token:
|
||||
token.write(creds.to_json())
|
||||
|
||||
return creds
|
||||
|
||||
def _load_sheet_from_id(self, id: str) -> List[Document]:
|
||||
"""Load a sheet and all tabs from an ID."""
|
||||
|
||||
from googleapiclient.discovery import build
|
||||
|
||||
creds = self._load_credentials()
|
||||
sheets_service = build("sheets", "v4", credentials=creds)
|
||||
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
|
||||
sheets = spreadsheet.get("sheets", [])
|
||||
|
||||
documents = []
|
||||
for sheet in sheets:
|
||||
sheet_name = sheet["properties"]["title"]
|
||||
result = (
|
||||
sheets_service.spreadsheets()
|
||||
.values()
|
||||
.get(spreadsheetId=id, range=sheet_name)
|
||||
.execute()
|
||||
)
|
||||
values = result.get("values", [])
|
||||
|
||||
header = values[0]
|
||||
for i, row in enumerate(values[1:], start=1):
|
||||
metadata = {
|
||||
"source": (
|
||||
f"https://docs.google.com/spreadsheets/d/{id}/"
|
||||
f"edit?gid={sheet['properties']['sheetId']}"
|
||||
),
|
||||
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
|
||||
"row": i,
|
||||
}
|
||||
content = []
|
||||
for j, v in enumerate(row):
|
||||
title = header[j].strip() if len(header) > j else ""
|
||||
content.append(f"{title}: {v.strip()}")
|
||||
|
||||
page_content = "\n".join(content)
|
||||
documents.append(Document(page_content=page_content, metadata=metadata))
|
||||
|
||||
return documents
|
||||
|
||||
def _load_document_from_id(self, id: str) -> Document:
|
||||
"""Load a document from an ID."""
|
||||
from io import BytesIO
|
||||
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
from googleapiclient.http import MediaIoBaseDownload
|
||||
|
||||
creds = self._load_credentials()
|
||||
service = build("drive", "v3", credentials=creds)
|
||||
|
||||
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
|
||||
request = service.files().export_media(fileId=id, mimeType="text/plain")
|
||||
fh = BytesIO()
|
||||
downloader = MediaIoBaseDownload(fh, request)
|
||||
done = False
|
||||
try:
|
||||
while done is False:
|
||||
status, done = downloader.next_chunk()
|
||||
|
||||
except HttpError as e:
|
||||
if e.resp.status == 404:
|
||||
print("File not found: {}".format(id))
|
||||
else:
|
||||
print("An error occurred: {}".format(e))
|
||||
|
||||
text = fh.getvalue().decode("utf-8")
|
||||
metadata = {
|
||||
"source": f"https://docs.google.com/document/d/{id}/edit",
|
||||
"title": f"{file.get('name')}",
|
||||
}
|
||||
return Document(page_content=text, metadata=metadata)
|
||||
|
||||
def _load_documents_from_folder(
|
||||
self, folder_id: str, *, file_types: Optional[Sequence[str]] = None
|
||||
) -> List[Document]:
|
||||
"""Load documents from a folder."""
|
||||
from googleapiclient.discovery import build
|
||||
|
||||
creds = self._load_credentials()
|
||||
service = build("drive", "v3", credentials=creds)
|
||||
files = self._fetch_files_recursive(service, folder_id)
|
||||
# If file types filter is provided, we'll filter by the file type.
|
||||
if file_types:
|
||||
_files = [f for f in files if f["mimeType"] in file_types] # type: ignore
|
||||
else:
|
||||
_files = files
|
||||
|
||||
returns = []
|
||||
for file in _files:
|
||||
if file["trashed"] and not self.load_trashed_files:
|
||||
continue
|
||||
elif file["mimeType"] == "application/vnd.google-apps.document":
|
||||
returns.append(self._load_document_from_id(file["id"])) # type: ignore
|
||||
elif file["mimeType"] == "application/vnd.google-apps.spreadsheet":
|
||||
returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore
|
||||
elif (
|
||||
file["mimeType"] == "application/pdf"
|
||||
or self.file_loader_cls is not None
|
||||
):
|
||||
returns.extend(self._load_file_from_id(file["id"])) # type: ignore
|
||||
else:
|
||||
pass
|
||||
return returns
|
||||
|
||||
def _fetch_files_recursive(
|
||||
self, service: Any, folder_id: str
|
||||
) -> List[Dict[str, Union[str, List[str]]]]:
|
||||
"""Fetch all files and subfolders recursively."""
|
||||
results = (
|
||||
service.files()
|
||||
.list(
|
||||
q=f"'{folder_id}' in parents",
|
||||
pageSize=1000,
|
||||
includeItemsFromAllDrives=True,
|
||||
supportsAllDrives=True,
|
||||
fields="nextPageToken, files(id, name, mimeType, parents, trashed)",
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
files = results.get("files", [])
|
||||
returns = []
|
||||
for file in files:
|
||||
if file["mimeType"] == "application/vnd.google-apps.folder":
|
||||
if self.recursive:
|
||||
returns.extend(self._fetch_files_recursive(service, file["id"]))
|
||||
else:
|
||||
returns.append(file)
|
||||
|
||||
return returns
|
||||
|
||||
def _load_documents_from_ids(self) -> List[Document]:
|
||||
"""Load documents from a list of IDs."""
|
||||
if not self.document_ids:
|
||||
raise ValueError("document_ids must be set")
|
||||
|
||||
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
|
||||
|
||||
def _load_file_from_id(self, id: str) -> List[Document]:
|
||||
"""Load a file from an ID."""
|
||||
from io import BytesIO
|
||||
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.http import MediaIoBaseDownload
|
||||
|
||||
creds = self._load_credentials()
|
||||
service = build("drive", "v3", credentials=creds)
|
||||
|
||||
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
|
||||
request = service.files().get_media(fileId=id)
|
||||
fh = BytesIO()
|
||||
downloader = MediaIoBaseDownload(fh, request)
|
||||
done = False
|
||||
while done is False:
|
||||
status, done = downloader.next_chunk()
|
||||
|
||||
if self.file_loader_cls is not None:
|
||||
fh.seek(0)
|
||||
loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs)
|
||||
docs = loader.load()
|
||||
for doc in docs:
|
||||
doc.metadata["source"] = f"https://drive.google.com/file/d/{id}/view"
|
||||
return docs
|
||||
|
||||
else:
|
||||
from PyPDF2 import PdfReader
|
||||
|
||||
content = fh.getvalue()
|
||||
pdf_reader = PdfReader(BytesIO(content))
|
||||
|
||||
return [
|
||||
Document(
|
||||
page_content=page.extract_text(),
|
||||
metadata={
|
||||
"source": f"https://drive.google.com/file/d/{id}/view",
|
||||
"title": f"{file.get('name')}",
|
||||
"page": i,
|
||||
},
|
||||
)
|
||||
for i, page in enumerate(pdf_reader.pages)
|
||||
]
|
||||
|
||||
def _load_file_from_ids(self) -> List[Document]:
|
||||
"""Load files from a list of IDs."""
|
||||
if not self.file_ids:
|
||||
raise ValueError("file_ids must be set")
|
||||
docs = []
|
||||
for file_id in self.file_ids:
|
||||
docs.extend(self._load_file_from_id(file_id))
|
||||
return docs
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load documents."""
|
||||
if self.folder_id:
|
||||
return self._load_documents_from_folder(
|
||||
self.folder_id, file_types=self.file_types
|
||||
)
|
||||
elif self.document_ids:
|
||||
return self._load_documents_from_ids()
|
||||
else:
|
||||
return self._load_file_from_ids()
|
||||
|
||||
33
libs/langchain/langchain/document_loaders/nuclia.py
Normal file
33
libs/langchain/langchain/document_loaders/nuclia.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""Extract text from any file type."""
|
||||
import json
|
||||
import uuid
|
||||
from typing import List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.tools.nuclia.tool import NucliaUnderstandingAPI
|
||||
|
||||
|
||||
class NucliaLoader(BaseLoader):
|
||||
"""Extract text from any file type."""
|
||||
|
||||
def __init__(self, path: str, nuclia_tool: NucliaUnderstandingAPI):
|
||||
self.nua = nuclia_tool
|
||||
self.id = str(uuid.uuid4())
|
||||
self.nua.run({"action": "push", "id": self.id, "path": path, "text": None})
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load documents."""
|
||||
data = self.nua.run(
|
||||
{"action": "pull", "id": self.id, "path": None, "text": None}
|
||||
)
|
||||
if not data:
|
||||
return []
|
||||
obj = json.loads(data)
|
||||
text = obj["extracted_text"][0]["body"]["text"]
|
||||
print(text)
|
||||
metadata = {
|
||||
"file": obj["file_extracted_data"][0],
|
||||
"metadata": obj["field_metadata"][0],
|
||||
}
|
||||
return [Document(page_content=text, metadata=metadata)]
|
||||
@@ -43,7 +43,7 @@ class OBSDirectoryLoader(BaseLoader):
|
||||
try:
|
||||
from obs import ObsClient
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"Could not import esdk-obs-python python package. "
|
||||
"Please install it with `pip install esdk-obs-python`."
|
||||
)
|
||||
|
||||
@@ -67,7 +67,7 @@ class OBSFileLoader(BaseLoader):
|
||||
try:
|
||||
from obs import ObsClient
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"Could not import esdk-obs-python python package. "
|
||||
"Please install it with `pip install esdk-obs-python`."
|
||||
)
|
||||
|
||||
@@ -149,14 +149,14 @@ class OpenAIWhisperParserLocal(BaseBlobParser):
|
||||
try:
|
||||
from pydub import AudioSegment
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"pydub package not found, please install it with " "`pip install pydub`"
|
||||
raise ImportError(
|
||||
"pydub package not found, please install it with `pip install pydub`"
|
||||
)
|
||||
|
||||
try:
|
||||
import librosa
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
raise ImportError(
|
||||
"librosa package not found, please install it with "
|
||||
"`pip install librosa`"
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Module contains common parsers for PDFs."""
|
||||
from typing import Any, Iterator, Mapping, Optional, Union
|
||||
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
@@ -149,3 +150,97 @@ class PDFPlumberParser(BaseBlobParser):
|
||||
)
|
||||
for page in doc.pages
|
||||
]
|
||||
|
||||
|
||||
class AmazonTextractPDFParser(BaseBlobParser):
|
||||
"""Sends PDF files to Amazon Textract and parses them to generate Documents.
|
||||
|
||||
For parsing multi-page PDFs, they have to reside on S3.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
textract_features: Optional[Sequence[int]] = None,
|
||||
client: Optional[Any] = None,
|
||||
) -> None:
|
||||
"""Initializes the parser.
|
||||
|
||||
Args:
|
||||
textract_features: Features to be used for extraction, each feature
|
||||
should be passed as an int that conforms to the enum
|
||||
`Textract_Features`, see `amazon-textract-caller` pkg
|
||||
client: boto3 textract client
|
||||
"""
|
||||
|
||||
try:
|
||||
import textractcaller as tc
|
||||
|
||||
self.tc = tc
|
||||
if textract_features is not None:
|
||||
self.textract_features = [
|
||||
tc.Textract_Features(f) for f in textract_features
|
||||
]
|
||||
else:
|
||||
self.textract_features = []
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import amazon-textract-caller python package. "
|
||||
"Please install it with `pip install amazon-textract-caller`."
|
||||
)
|
||||
|
||||
if not client:
|
||||
try:
|
||||
import boto3
|
||||
|
||||
self.boto3_textract_client = boto3.client("textract")
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import boto3 python package. "
|
||||
"Please install it with `pip install boto3`."
|
||||
)
|
||||
else:
|
||||
self.boto3_textract_client = client
|
||||
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
|
||||
"""Iterates over the Blob pages and returns an Iterator with a Document
|
||||
for each page, like the other parsers If multi-page document, blob.path
|
||||
has to be set to the S3 URI and for single page docs the blob.data is taken
|
||||
"""
|
||||
|
||||
url_parse_result = urlparse(str(blob.path)) if blob.path else None
|
||||
# Either call with S3 path (multi-page) or with bytes (single-page)
|
||||
if (
|
||||
url_parse_result
|
||||
and url_parse_result.scheme == "s3"
|
||||
and url_parse_result.netloc
|
||||
):
|
||||
textract_response_json = self.tc.call_textract(
|
||||
input_document=str(blob.path),
|
||||
features=self.textract_features,
|
||||
boto3_textract_client=self.boto3_textract_client,
|
||||
)
|
||||
else:
|
||||
textract_response_json = self.tc.call_textract(
|
||||
input_document=blob.as_bytes(),
|
||||
features=self.textract_features,
|
||||
call_mode=self.tc.Textract_Call_Mode.FORCE_SYNC,
|
||||
boto3_textract_client=self.boto3_textract_client,
|
||||
)
|
||||
|
||||
current_text = ""
|
||||
current_page = 1
|
||||
for block in textract_response_json["Blocks"]:
|
||||
if "Page" in block and not (int(block["Page"]) == current_page):
|
||||
yield Document(
|
||||
page_content=current_text,
|
||||
metadata={"source": blob.source, "page": current_page},
|
||||
)
|
||||
current_text = ""
|
||||
current_page = int(block["Page"])
|
||||
if "Text" in block:
|
||||
current_text += block["Text"] + " "
|
||||
|
||||
yield Document(
|
||||
page_content=current_text,
|
||||
metadata={"source": blob.source, "page": current_page},
|
||||
)
|
||||
|
||||
@@ -7,7 +7,7 @@ import time
|
||||
from abc import ABC
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterator, List, Mapping, Optional, Union
|
||||
from typing import Any, Iterator, List, Mapping, Optional, Sequence, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
@@ -16,6 +16,7 @@ from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
from langchain.document_loaders.parsers.pdf import (
|
||||
AmazonTextractPDFParser,
|
||||
PDFMinerParser,
|
||||
PDFPlumberParser,
|
||||
PyMuPDFParser,
|
||||
@@ -71,22 +72,26 @@ class BasePDFLoader(BaseLoader, ABC):
|
||||
if "~" in self.file_path:
|
||||
self.file_path = os.path.expanduser(self.file_path)
|
||||
|
||||
# If the file is a web path, download it to a temporary file, and use that
|
||||
# If the file is a web path or S3, download it to a temporary file, and use that
|
||||
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
|
||||
r = requests.get(self.file_path)
|
||||
|
||||
if r.status_code != 200:
|
||||
raise ValueError(
|
||||
"Check the url of your file; returned status code %s"
|
||||
% r.status_code
|
||||
)
|
||||
|
||||
self.web_path = self.file_path
|
||||
self.temp_dir = tempfile.TemporaryDirectory()
|
||||
temp_pdf = Path(self.temp_dir.name) / "tmp.pdf"
|
||||
with open(temp_pdf, mode="wb") as f:
|
||||
f.write(r.content)
|
||||
self.file_path = str(temp_pdf)
|
||||
_, suffix = os.path.splitext(self.file_path)
|
||||
temp_pdf = os.path.join(self.temp_dir.name, f"tmp{suffix}")
|
||||
if self._is_s3_url(self.file_path):
|
||||
self.web_path = self.file_path
|
||||
else:
|
||||
r = requests.get(self.file_path)
|
||||
|
||||
if r.status_code != 200:
|
||||
raise ValueError(
|
||||
"Check the url of your file; returned status code %s"
|
||||
% r.status_code
|
||||
)
|
||||
|
||||
self.web_path = self.file_path
|
||||
with open(temp_pdf, mode="wb") as f:
|
||||
f.write(r.content)
|
||||
self.file_path = str(temp_pdf)
|
||||
elif not os.path.isfile(self.file_path):
|
||||
raise ValueError("File path %s is not a valid file or url" % self.file_path)
|
||||
|
||||
@@ -100,6 +105,17 @@ class BasePDFLoader(BaseLoader, ABC):
|
||||
parsed = urlparse(url)
|
||||
return bool(parsed.netloc) and bool(parsed.scheme)
|
||||
|
||||
@staticmethod
|
||||
def _is_s3_url(url: str) -> bool:
|
||||
"""check if the url is S3"""
|
||||
try:
|
||||
result = urlparse(url)
|
||||
if result.scheme == "s3" and result.netloc:
|
||||
return True
|
||||
return False
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
@property
|
||||
def source(self) -> str:
|
||||
return self.web_path if self.web_path is not None else self.file_path
|
||||
@@ -440,3 +456,144 @@ class PDFPlumberLoader(BasePDFLoader):
|
||||
parser = PDFPlumberParser(text_kwargs=self.text_kwargs)
|
||||
blob = Blob.from_path(self.file_path)
|
||||
return parser.parse(blob)
|
||||
|
||||
|
||||
class AmazonTextractPDFLoader(BasePDFLoader):
|
||||
"""Loads a PDF document from local file system, HTTP or S3.
|
||||
|
||||
To authenticate, the AWS client uses the following methods to
|
||||
automatically load credentials:
|
||||
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
|
||||
|
||||
If a specific credential profile should be used, you must pass
|
||||
the name of the profile from the ~/.aws/credentials file that is to be used.
|
||||
|
||||
Make sure the credentials / roles used have the required policies to
|
||||
access the Amazon Textract service.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
from langchain.document_loaders import AmazonTextractPDFLoader
|
||||
loader = AmazonTextractPDFLoader(
|
||||
file_path="s3://pdfs/myfile.pdf"
|
||||
)
|
||||
document = loader.load()
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
textract_features: Optional[Sequence[str]] = None,
|
||||
client: Optional[Any] = None,
|
||||
credentials_profile_name: Optional[str] = None,
|
||||
region_name: Optional[str] = None,
|
||||
endpoint_url: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialize the loader.
|
||||
|
||||
Args:
|
||||
file_path: A file, url or s3 path for input file
|
||||
textract_features: Features to be used for extraction, each feature
|
||||
should be passed as a str that conforms to the enum
|
||||
`Textract_Features`, see `amazon-textract-caller` pkg
|
||||
client: boto3 textract client (Optional)
|
||||
credentials_profile_name: AWS profile name, if not default (Optional)
|
||||
region_name: AWS region, eg us-east-1 (Optional)
|
||||
endpoint_url: endpoint url for the textract service (Optional)
|
||||
|
||||
"""
|
||||
super().__init__(file_path)
|
||||
|
||||
try:
|
||||
import textractcaller as tc # noqa: F401
|
||||
except ImportError:
|
||||
raise ModuleNotFoundError(
|
||||
"Could not import amazon-textract-caller python package. "
|
||||
"Please install it with `pip install amazon-textract-caller`."
|
||||
)
|
||||
if textract_features:
|
||||
features = [tc.Textract_Features[x] for x in textract_features]
|
||||
else:
|
||||
features = []
|
||||
|
||||
if credentials_profile_name or region_name or endpoint_url:
|
||||
try:
|
||||
import boto3
|
||||
|
||||
if credentials_profile_name is not None:
|
||||
session = boto3.Session(profile_name=credentials_profile_name)
|
||||
else:
|
||||
# use default credentials
|
||||
session = boto3.Session()
|
||||
|
||||
client_params = {}
|
||||
if region_name:
|
||||
client_params["region_name"] = region_name
|
||||
if endpoint_url:
|
||||
client_params["endpoint_url"] = endpoint_url
|
||||
|
||||
client = session.client("textract", **client_params)
|
||||
|
||||
except ImportError:
|
||||
raise ModuleNotFoundError(
|
||||
"Could not import boto3 python package. "
|
||||
"Please install it with `pip install boto3`."
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
"Could not load credentials to authenticate with AWS client. "
|
||||
"Please check that credentials in the specified "
|
||||
"profile name are valid."
|
||||
) from e
|
||||
self.parser = AmazonTextractPDFParser(textract_features=features, client=client)
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load given path as pages."""
|
||||
return list(self.lazy_load())
|
||||
|
||||
def lazy_load(
|
||||
self,
|
||||
) -> Iterator[Document]:
|
||||
"""Lazy load documents"""
|
||||
# the self.file_path is local, but the blob has to include
|
||||
# the S3 location if the file originated from S3 for multi-page documents
|
||||
# raises ValueError when multi-page and not on S3"""
|
||||
|
||||
if self.web_path and self._is_s3_url(self.web_path):
|
||||
blob = Blob(path=self.web_path)
|
||||
else:
|
||||
blob = Blob.from_path(self.file_path)
|
||||
if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1:
|
||||
raise ValueError(
|
||||
f"the file {blob.path} is a multi-page document, \
|
||||
but not stored on S3. \
|
||||
Textract requires multi-page documents to be on S3."
|
||||
)
|
||||
|
||||
yield from self.parser.parse(blob)
|
||||
|
||||
@staticmethod
|
||||
def _get_number_of_pages(blob: Blob) -> int:
|
||||
try:
|
||||
import pypdf
|
||||
from PIL import Image, ImageSequence
|
||||
|
||||
except ImportError:
|
||||
raise ModuleNotFoundError(
|
||||
"Could not import pypdf or Pilloe python package. "
|
||||
"Please install it with `pip install pypdf Pillow`."
|
||||
)
|
||||
if blob.mimetype == "application/pdf":
|
||||
with blob.as_bytes_io() as input_pdf_file:
|
||||
pdf_reader = pypdf.PdfReader(input_pdf_file)
|
||||
return len(pdf_reader.pages)
|
||||
elif blob.mimetype == "image/tiff":
|
||||
num_pages = 0
|
||||
img = Image.open(blob.as_bytes())
|
||||
for _, _ in enumerate(ImageSequence.Iterator(img)):
|
||||
num_pages += 1
|
||||
return num_pages
|
||||
elif blob.mimetype in ["image/png", "image/jpeg"]:
|
||||
return 1
|
||||
else:
|
||||
raise ValueError(f"unsupported mime type: {blob.mimetype}")
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
from typing import Iterator, List, Optional, Set
|
||||
import asyncio
|
||||
import re
|
||||
from typing import Callable, Iterator, List, Optional, Set, Union
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import requests
|
||||
@@ -13,20 +15,117 @@ class RecursiveUrlLoader(BaseLoader):
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
max_depth: Optional[int] = None,
|
||||
use_async: Optional[bool] = None,
|
||||
extractor: Optional[Callable[[str], str]] = None,
|
||||
exclude_dirs: Optional[str] = None,
|
||||
timeout: Optional[int] = None,
|
||||
prevent_outside: Optional[bool] = None,
|
||||
) -> None:
|
||||
"""Initialize with URL to crawl and any subdirectories to exclude.
|
||||
|
||||
Args:
|
||||
url: The URL to crawl.
|
||||
exclude_dirs: A list of subdirectories to exclude.
|
||||
use_async: Whether to use asynchronous loading,
|
||||
if use_async is true, this function will not be lazy,
|
||||
but it will still work in the expected way, just not lazy.
|
||||
extractor: A function to extract the text from the html,
|
||||
when extract function returns empty string, the document will be ignored.
|
||||
max_depth: The max depth of the recursive loading.
|
||||
timeout: The timeout for the requests, in the unit of seconds.
|
||||
"""
|
||||
|
||||
self.url = url
|
||||
self.exclude_dirs = exclude_dirs
|
||||
self.use_async = use_async if use_async is not None else False
|
||||
self.extractor = extractor if extractor is not None else lambda x: x
|
||||
self.max_depth = max_depth if max_depth is not None else 2
|
||||
self.timeout = timeout if timeout is not None else 10
|
||||
self.prevent_outside = prevent_outside if prevent_outside is not None else True
|
||||
|
||||
def get_child_links_recursive(
|
||||
self, url: str, visited: Optional[Set[str]] = None
|
||||
def _get_sub_links(self, raw_html: str, base_url: str) -> List[str]:
|
||||
"""This function extracts all the links from the raw html,
|
||||
and convert them into absolute paths.
|
||||
|
||||
Args:
|
||||
raw_html (str): original html
|
||||
base_url (str): the base url of the html
|
||||
|
||||
Returns:
|
||||
List[str]: sub links
|
||||
"""
|
||||
# Get all links that are relative to the root of the website
|
||||
all_links = re.findall(r"href=[\"\'](.*?)[\"\']", raw_html)
|
||||
absolute_paths = []
|
||||
invalid_prefixes = ("javascript:", "mailto:", "#")
|
||||
invalid_suffixes = (
|
||||
".css",
|
||||
".js",
|
||||
".ico",
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".gif",
|
||||
".svg",
|
||||
)
|
||||
# Process the links
|
||||
for link in all_links:
|
||||
# Ignore blacklisted patterns
|
||||
# like javascript: or mailto:, files of svg, ico, css, js
|
||||
if link.startswith(invalid_prefixes) or link.endswith(invalid_suffixes):
|
||||
continue
|
||||
# Some may be absolute links like https://to/path
|
||||
if link.startswith("http"):
|
||||
if (not self.prevent_outside) or (
|
||||
self.prevent_outside and link.startswith(base_url)
|
||||
):
|
||||
absolute_paths.append(link)
|
||||
else:
|
||||
absolute_paths.append(urljoin(base_url, link))
|
||||
|
||||
# Some may be relative links like /to/path
|
||||
if link.startswith("/") and not link.startswith("//"):
|
||||
absolute_paths.append(urljoin(base_url, link))
|
||||
continue
|
||||
# Some may have omitted the protocol like //to/path
|
||||
if link.startswith("//"):
|
||||
absolute_paths.append(f"{urlparse(base_url).scheme}:{link}")
|
||||
continue
|
||||
# Remove duplicates
|
||||
# also do another filter to prevent outside links
|
||||
absolute_paths = list(
|
||||
set(
|
||||
[
|
||||
path
|
||||
for path in absolute_paths
|
||||
if not self.prevent_outside
|
||||
or path.startswith(base_url)
|
||||
and path != base_url
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
return absolute_paths
|
||||
|
||||
def _gen_metadata(self, raw_html: str, url: str) -> dict:
|
||||
"""Build metadata from BeautifulSoup output."""
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
except ImportError:
|
||||
print("The bs4 package is required for the RecursiveUrlLoader.")
|
||||
print("Please install it with `pip install bs4`.")
|
||||
metadata = {"source": url}
|
||||
soup = BeautifulSoup(raw_html, "html.parser")
|
||||
if title := soup.find("title"):
|
||||
metadata["title"] = title.get_text()
|
||||
if description := soup.find("meta", attrs={"name": "description"}):
|
||||
metadata["description"] = description.get("content", None)
|
||||
if html := soup.find("html"):
|
||||
metadata["language"] = html.get("lang", None)
|
||||
return metadata
|
||||
|
||||
def _get_child_links_recursive(
|
||||
self, url: str, visited: Optional[Set[str]] = None, depth: int = 0
|
||||
) -> Iterator[Document]:
|
||||
"""Recursively get all child links starting with the path of the input URL.
|
||||
|
||||
@@ -35,26 +134,12 @@ class RecursiveUrlLoader(BaseLoader):
|
||||
visited: A set of visited URLs.
|
||||
"""
|
||||
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The BeautifulSoup package is required for the RecursiveUrlLoader."
|
||||
)
|
||||
|
||||
# Construct the base and parent URLs
|
||||
parsed_url = urlparse(url)
|
||||
base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
parent_url = "/".join(parsed_url.path.split("/")[:-1])
|
||||
current_path = parsed_url.path
|
||||
if depth > self.max_depth:
|
||||
return []
|
||||
|
||||
# Add a trailing slash if not present
|
||||
if not base_url.endswith("/"):
|
||||
base_url += "/"
|
||||
if not parent_url.endswith("/"):
|
||||
parent_url += "/"
|
||||
if not url.endswith("/"):
|
||||
url += "/"
|
||||
|
||||
# Exclude the root and parent from a list
|
||||
visited = set() if visited is None else visited
|
||||
@@ -63,44 +148,162 @@ class RecursiveUrlLoader(BaseLoader):
|
||||
if self.exclude_dirs and any(
|
||||
url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs
|
||||
):
|
||||
return visited
|
||||
return []
|
||||
|
||||
# Get all links that are relative to the root of the website
|
||||
response = requests.get(url)
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
all_links = [link.get("href") for link in soup.find_all("a")]
|
||||
# Get all links that can be accessed from the current URL
|
||||
try:
|
||||
response = requests.get(url, timeout=self.timeout)
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
# Extract only the links that are children of the current URL
|
||||
child_links = list(
|
||||
{
|
||||
link
|
||||
for link in all_links
|
||||
if link and link.startswith(current_path) and link != current_path
|
||||
}
|
||||
)
|
||||
|
||||
# Get absolute path for all root relative links listed
|
||||
absolute_paths = [urljoin(base_url, link) for link in child_links]
|
||||
absolute_paths = self._get_sub_links(response.text, url)
|
||||
|
||||
# Store the visited links and recursively visit the children
|
||||
for link in absolute_paths:
|
||||
# Check all unvisited links
|
||||
if link not in visited:
|
||||
visited.add(link)
|
||||
loaded_link = WebBaseLoader(link).load()
|
||||
if isinstance(loaded_link, list):
|
||||
yield from loaded_link
|
||||
else:
|
||||
yield loaded_link
|
||||
|
||||
try:
|
||||
response = requests.get(link)
|
||||
text = response.text
|
||||
except Exception:
|
||||
# unreachable link, so just ignore it
|
||||
continue
|
||||
loaded_link = Document(
|
||||
page_content=self.extractor(text),
|
||||
metadata=self._gen_metadata(text, link),
|
||||
)
|
||||
yield loaded_link
|
||||
# If the link is a directory (w/ children) then visit it
|
||||
if link.endswith("/"):
|
||||
yield from self.get_child_links_recursive(link, visited)
|
||||
yield from self._get_child_links_recursive(link, visited, depth + 1)
|
||||
return []
|
||||
|
||||
return visited
|
||||
async def _async_get_child_links_recursive(
|
||||
self, url: str, visited: Optional[Set[str]] = None, depth: int = 0
|
||||
) -> List[Document]:
|
||||
"""Recursively get all child links starting with the path of the input URL.
|
||||
|
||||
Args:
|
||||
url: The URL to crawl.
|
||||
visited: A set of visited URLs.
|
||||
depth: To reach the current url, how many pages have been visited.
|
||||
"""
|
||||
try:
|
||||
import aiohttp
|
||||
except ImportError:
|
||||
print("The aiohttp package is required for the RecursiveUrlLoader.")
|
||||
print("Please install it with `pip install aiohttp`.")
|
||||
if depth > self.max_depth:
|
||||
return []
|
||||
|
||||
# Add a trailing slash if not present
|
||||
if not url.endswith("/"):
|
||||
url += "/"
|
||||
|
||||
# Exclude the root and parent from a list
|
||||
visited = set() if visited is None else visited
|
||||
|
||||
# Exclude the links that start with any of the excluded directories
|
||||
if self.exclude_dirs and any(
|
||||
url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs
|
||||
):
|
||||
return []
|
||||
# Disable SSL verification because websites may have invalid SSL certificates,
|
||||
# but won't cause any security issues for us.
|
||||
async with aiohttp.ClientSession(
|
||||
connector=aiohttp.TCPConnector(ssl=False),
|
||||
timeout=aiohttp.ClientTimeout(self.timeout),
|
||||
) as session:
|
||||
# Some url may be invalid, so catch the exception
|
||||
response: aiohttp.ClientResponse
|
||||
try:
|
||||
response = await session.get(url)
|
||||
text = await response.text()
|
||||
except aiohttp.client_exceptions.InvalidURL:
|
||||
return []
|
||||
# There may be some other exceptions, so catch them,
|
||||
# we don't want to stop the whole process
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
absolute_paths = self._get_sub_links(text, url)
|
||||
|
||||
# Worker will be only called within the current function
|
||||
# Worker function will process the link
|
||||
# then recursively call get_child_links_recursive to process the children
|
||||
async def worker(link: str) -> Union[Document, None]:
|
||||
try:
|
||||
async with aiohttp.ClientSession(
|
||||
connector=aiohttp.TCPConnector(ssl=False),
|
||||
timeout=aiohttp.ClientTimeout(self.timeout),
|
||||
) as session:
|
||||
response = await session.get(link)
|
||||
text = await response.text()
|
||||
extracted = self.extractor(text)
|
||||
if len(extracted) > 0:
|
||||
return Document(
|
||||
page_content=extracted,
|
||||
metadata=self._gen_metadata(text, link),
|
||||
)
|
||||
else:
|
||||
return None
|
||||
# Despite the fact that we have filtered some links,
|
||||
# there may still be some invalid links, so catch the exception
|
||||
except aiohttp.client_exceptions.InvalidURL:
|
||||
return None
|
||||
# There may be some other exceptions, so catch them,
|
||||
# we don't want to stop the whole process
|
||||
except Exception:
|
||||
# print(e)
|
||||
return None
|
||||
|
||||
# The coroutines that will be executed
|
||||
tasks = []
|
||||
# Generate the tasks
|
||||
for link in absolute_paths:
|
||||
# Check all unvisited links
|
||||
if link not in visited:
|
||||
visited.add(link)
|
||||
tasks.append(worker(link))
|
||||
# Get the not None results
|
||||
results = list(
|
||||
filter(lambda x: x is not None, await asyncio.gather(*tasks))
|
||||
)
|
||||
# Recursively call the function to get the children of the children
|
||||
sub_tasks = []
|
||||
for link in absolute_paths:
|
||||
sub_tasks.append(
|
||||
self._async_get_child_links_recursive(link, visited, depth + 1)
|
||||
)
|
||||
# sub_tasks returns coroutines of list,
|
||||
# so we need to flatten the list await asyncio.gather(*sub_tasks)
|
||||
flattened = []
|
||||
next_results = await asyncio.gather(*sub_tasks)
|
||||
for sub_result in next_results:
|
||||
if isinstance(sub_result, Exception):
|
||||
# We don't want to stop the whole process, so just ignore it
|
||||
# Not standard html format or invalid url or 404 may cause this
|
||||
# But we can't do anything about it.
|
||||
continue
|
||||
if sub_result is not None:
|
||||
flattened += sub_result
|
||||
results += flattened
|
||||
return list(filter(lambda x: x is not None, results))
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Lazy load web pages."""
|
||||
return self.get_child_links_recursive(self.url)
|
||||
"""Lazy load web pages.
|
||||
When use_async is True, this function will not be lazy,
|
||||
but it will still work in the expected way, just not lazy."""
|
||||
if self.use_async:
|
||||
results = asyncio.run(self._async_get_child_links_recursive(self.url))
|
||||
if results is None:
|
||||
return iter([])
|
||||
else:
|
||||
return iter(results)
|
||||
else:
|
||||
return self._get_child_links_recursive(self.url)
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load web pages."""
|
||||
|
||||
133
libs/langchain/langchain/document_loaders/rss.py
Normal file
133
libs/langchain/langchain/document_loaders/rss.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Loader that uses unstructured to load HTML files."""
|
||||
import logging
|
||||
from typing import Any, Iterator, List, Optional, Sequence
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.document_loaders.news import NewsURLLoader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RSSFeedLoader(BaseLoader):
|
||||
"""Loader that uses newspaper to load news articles from RSS feeds.
|
||||
|
||||
Args:
|
||||
urls: URLs for RSS feeds to load. Each articles in the feed is loaded into its own document.
|
||||
opml: OPML file to load feed urls from. Only one of urls or opml should be provided. The value
|
||||
can be a URL string, or OPML markup contents as byte or string.
|
||||
continue_on_failure: If True, continue loading documents even if
|
||||
loading fails for a particular URL.
|
||||
show_progress_bar: If True, use tqdm to show a loading progress bar. Requires
|
||||
tqdm to be installed, ``pip install tqdm``.
|
||||
**newsloader_kwargs: Any additional named arguments to pass to
|
||||
NewsURLLoader.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.document_loaders import RSSFeedLoader
|
||||
|
||||
loader = RSSFeedLoader(
|
||||
urls=["<url-1>", "<url-2>"],
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
The loader uses feedparser to parse RSS feeds. The feedparser library is not installed by default so you should
|
||||
install it if using this loader:
|
||||
https://pythonhosted.org/feedparser/
|
||||
|
||||
If you use OPML, you should also install listparser:
|
||||
https://pythonhosted.org/listparser/
|
||||
|
||||
Finally, newspaper is used to process each article:
|
||||
https://newspaper.readthedocs.io/en/latest/
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls: Optional[Sequence[str]] = None,
|
||||
opml: Optional[str] = None,
|
||||
continue_on_failure: bool = True,
|
||||
show_progress_bar: bool = False,
|
||||
**newsloader_kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize with urls or OPML."""
|
||||
if (urls is None) == (
|
||||
opml is None
|
||||
): # This is True if both are None or neither is None
|
||||
raise ValueError(
|
||||
"Provide either the urls or the opml argument, but not both."
|
||||
)
|
||||
self.urls = urls
|
||||
self.opml = opml
|
||||
self.continue_on_failure = continue_on_failure
|
||||
self.show_progress_bar = show_progress_bar
|
||||
self.newsloader_kwargs = newsloader_kwargs
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
iter = self.lazy_load()
|
||||
if self.show_progress_bar:
|
||||
try:
|
||||
from tqdm import tqdm
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Package tqdm must be installed if show_progress_bar=True. "
|
||||
"Please install with 'pip install tqdm' or set "
|
||||
"show_progress_bar=False."
|
||||
) from e
|
||||
iter = tqdm(iter)
|
||||
return list(iter)
|
||||
|
||||
@property
|
||||
def _get_urls(self) -> Sequence[str]:
|
||||
if self.urls:
|
||||
return self.urls
|
||||
try:
|
||||
import listparser
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Package listparser must be installed if the opml arg is used. "
|
||||
"Please install with 'pip install listparser' or use the "
|
||||
"urls arg instead."
|
||||
) from e
|
||||
rss = listparser.parse(self.opml)
|
||||
return [feed.url for feed in rss.feeds]
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
try:
|
||||
import feedparser # noqa:F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"feedparser package not found, please install it with "
|
||||
"`pip install feedparser`"
|
||||
)
|
||||
|
||||
for url in self._get_urls:
|
||||
try:
|
||||
feed = feedparser.parse(url)
|
||||
if getattr(feed, "bozo", False):
|
||||
raise ValueError(
|
||||
f"Error fetching {url}, exception: {feed.bozo_exception}"
|
||||
)
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
logger.error(f"Error fetching {url}, exception: {e}")
|
||||
continue
|
||||
else:
|
||||
raise e
|
||||
try:
|
||||
for entry in feed.entries:
|
||||
loader = NewsURLLoader(
|
||||
urls=[entry.link],
|
||||
**self.newsloader_kwargs,
|
||||
)
|
||||
article = loader.load()[0]
|
||||
article.metadata["feed"] = url
|
||||
yield article
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
logger.error(f"Error processing entry {entry.link}, exception: {e}")
|
||||
continue
|
||||
else:
|
||||
raise e
|
||||
@@ -27,6 +27,7 @@ from langchain.document_transformers.embeddings_redundant_filter import (
|
||||
)
|
||||
from langchain.document_transformers.html2text import Html2TextTransformer
|
||||
from langchain.document_transformers.long_context_reorder import LongContextReorder
|
||||
from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer
|
||||
from langchain.document_transformers.openai_functions import OpenAIMetadataTagger
|
||||
|
||||
__all__ = [
|
||||
@@ -37,6 +38,7 @@ __all__ = [
|
||||
"EmbeddingsRedundantFilter",
|
||||
"get_stateful_documents",
|
||||
"LongContextReorder",
|
||||
"NucliaTextTransformer",
|
||||
"OpenAIMetadataTagger",
|
||||
"Html2TextTransformer",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
import asyncio
|
||||
import json
|
||||
import uuid
|
||||
from typing import Any, Sequence
|
||||
|
||||
from langchain.schema.document import BaseDocumentTransformer, Document
|
||||
from langchain.tools.nuclia.tool import NucliaUnderstandingAPI
|
||||
|
||||
|
||||
class NucliaTextTransformer(BaseDocumentTransformer):
|
||||
"""
|
||||
The Nuclia Understanding API splits into paragraphs and sentences,
|
||||
identifies entities, provides a summary of the text and generates
|
||||
embeddings for all the sentences.
|
||||
"""
|
||||
|
||||
def __init__(self, nua: NucliaUnderstandingAPI):
|
||||
self.nua = nua
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
tasks = [
|
||||
self.nua.arun(
|
||||
{
|
||||
"action": "push",
|
||||
"id": str(uuid.uuid4()),
|
||||
"text": doc.page_content,
|
||||
"path": None,
|
||||
}
|
||||
)
|
||||
for doc in documents
|
||||
]
|
||||
results = await asyncio.gather(*tasks)
|
||||
for doc, result in zip(documents, results):
|
||||
obj = json.loads(result)
|
||||
metadata = {
|
||||
"file": obj["file_extracted_data"][0],
|
||||
"metadata": obj["field_metadata"][0],
|
||||
}
|
||||
doc.metadata["nuclia"] = metadata
|
||||
return documents
|
||||
@@ -27,7 +27,7 @@ from langchain.embeddings.deepinfra import DeepInfraEmbeddings
|
||||
from langchain.embeddings.edenai import EdenAiEmbeddings
|
||||
from langchain.embeddings.elasticsearch import ElasticsearchEmbeddings
|
||||
from langchain.embeddings.embaas import EmbaasEmbeddings
|
||||
from langchain.embeddings.fake import FakeEmbeddings
|
||||
from langchain.embeddings.fake import DeterministicFakeEmbedding, FakeEmbeddings
|
||||
from langchain.embeddings.google_palm import GooglePalmEmbeddings
|
||||
from langchain.embeddings.gpt4all import GPT4AllEmbeddings
|
||||
from langchain.embeddings.huggingface import (
|
||||
@@ -78,6 +78,7 @@ __all__ = [
|
||||
"SelfHostedHuggingFaceEmbeddings",
|
||||
"SelfHostedHuggingFaceInstructEmbeddings",
|
||||
"FakeEmbeddings",
|
||||
"DeterministicFakeEmbedding",
|
||||
"AlephAlphaAsymmetricSemanticEmbedding",
|
||||
"AlephAlphaSymmetricSemanticEmbedding",
|
||||
"SentenceTransformerEmbeddings",
|
||||
|
||||
@@ -16,10 +16,11 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
|
||||
|
||||
embeddings = AlephAlphaSymmetricSemanticEmbedding()
|
||||
embeddings = AlephAlphaAsymmetricSemanticEmbedding(
|
||||
normalize=True, compress_to_size=128
|
||||
)
|
||||
|
||||
document = "This is a content of the document"
|
||||
query = "What is the content of the document?"
|
||||
@@ -30,24 +31,55 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
"""Aleph Alpha client."""
|
||||
model: Optional[str] = "luminous-base"
|
||||
|
||||
# Embedding params
|
||||
model: str = "luminous-base"
|
||||
"""Model name to use."""
|
||||
hosting: Optional[str] = "https://api.aleph-alpha.com"
|
||||
"""Optional parameter that specifies which datacenters may process the request."""
|
||||
normalize: Optional[bool] = True
|
||||
"""Should returned embeddings be normalized"""
|
||||
compress_to_size: Optional[int] = 128
|
||||
compress_to_size: Optional[int] = None
|
||||
"""Should the returned embeddings come back as an original 5120-dim vector,
|
||||
or should it be compressed to 128-dim."""
|
||||
normalize: Optional[bool] = None
|
||||
"""Should returned embeddings be normalized"""
|
||||
contextual_control_threshold: Optional[int] = None
|
||||
"""Attention control parameters only apply to those tokens that have
|
||||
explicitly been set in the request."""
|
||||
control_log_additive: Optional[bool] = True
|
||||
control_log_additive: bool = True
|
||||
"""Apply controls on prompt items by adding the log(control_factor)
|
||||
to attention scores."""
|
||||
|
||||
# Client params
|
||||
aleph_alpha_api_key: Optional[str] = None
|
||||
"""API key for Aleph Alpha API."""
|
||||
host: str = "https://api.aleph-alpha.com"
|
||||
"""The hostname of the API host.
|
||||
The default one is "https://api.aleph-alpha.com")"""
|
||||
hosting: Optional[str] = None
|
||||
"""Determines in which datacenters the request may be processed.
|
||||
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
|
||||
Not setting this value, or setting it to None, gives us maximal flexibility
|
||||
in processing your request in our
|
||||
own datacenters and on servers hosted with other providers.
|
||||
Choose this option for maximal availability.
|
||||
Setting it to "aleph-alpha" allows us to only process the request
|
||||
in our own datacenters.
|
||||
Choose this option for maximal data privacy."""
|
||||
request_timeout_seconds: int = 305
|
||||
"""Client timeout that will be set for HTTP requests in the
|
||||
`requests` library's API calls.
|
||||
Server will close all requests after 300 seconds with an internal server error."""
|
||||
total_retries: int = 8
|
||||
"""The number of retries made in case requests fail with certain retryable
|
||||
status codes. If the last
|
||||
retry fails a corresponding exception is raised. Note, that between retries
|
||||
an exponential backoff
|
||||
is applied, starting with 0.5 s after the first retry and doubling for each
|
||||
retry made. So with the
|
||||
default setting of 8 retries a total wait time of 63.5 s is added between
|
||||
the retries."""
|
||||
nice: bool = False
|
||||
"""Setting this to True, will signal to the API that you intend to be
|
||||
nice to other users
|
||||
by de-prioritizing your request below concurrent ones."""
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
@@ -57,12 +89,21 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
|
||||
)
|
||||
try:
|
||||
from aleph_alpha_client import Client
|
||||
|
||||
values["client"] = Client(
|
||||
token=aleph_alpha_api_key,
|
||||
host=values["host"],
|
||||
hosting=values["hosting"],
|
||||
request_timeout_seconds=values["request_timeout_seconds"],
|
||||
total_retries=values["total_retries"],
|
||||
nice=values["nice"],
|
||||
)
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import aleph_alpha_client python package. "
|
||||
"Please install it with `pip install aleph_alpha_client`."
|
||||
)
|
||||
values["client"] = Client(token=aleph_alpha_api_key)
|
||||
|
||||
return values
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
@@ -152,7 +193,9 @@ class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding
|
||||
|
||||
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
|
||||
|
||||
embeddings = AlephAlphaAsymmetricSemanticEmbedding()
|
||||
embeddings = AlephAlphaAsymmetricSemanticEmbedding(
|
||||
normalize=True, compress_to_size=128
|
||||
)
|
||||
text = "This is a test text"
|
||||
|
||||
doc_result = embeddings.embed_documents([text])
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import hashlib
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
@@ -20,3 +21,30 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
return self._get_embedding()
|
||||
|
||||
|
||||
class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
"""
|
||||
Fake embedding model that always returns
|
||||
the same embedding vector for the same text.
|
||||
"""
|
||||
|
||||
size: int
|
||||
"""The size of the embedding vector."""
|
||||
|
||||
def _get_embedding(self, seed: int) -> List[float]:
|
||||
# set the seed for the random generator
|
||||
np.random.seed(seed)
|
||||
return list(np.random.normal(size=self.size))
|
||||
|
||||
def _get_seed(self, text: str) -> int:
|
||||
"""
|
||||
Get a seed for the random generator, using the hash of the text.
|
||||
"""
|
||||
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
return self._get_embedding(seed=self._get_seed(text))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, List
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
@@ -15,12 +15,13 @@ class ModelScopeEmbeddings(BaseModel, Embeddings):
|
||||
|
||||
from langchain.embeddings import ModelScopeEmbeddings
|
||||
model_id = "damo/nlp_corom_sentence-embedding_english-base"
|
||||
embed = ModelScopeEmbeddings(model_id=model_id)
|
||||
embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0")
|
||||
"""
|
||||
|
||||
embed: Any
|
||||
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
|
||||
"""Model name to use."""
|
||||
model_revision: Optional[str] = None
|
||||
|
||||
def __init__(self, **kwargs: Any):
|
||||
"""Initialize the modelscope"""
|
||||
@@ -28,14 +29,16 @@ class ModelScopeEmbeddings(BaseModel, Embeddings):
|
||||
try:
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.utils.constant import Tasks
|
||||
|
||||
self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id)
|
||||
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Could not import some python packages."
|
||||
"Please install it with `pip install modelscope`."
|
||||
) from e
|
||||
self.embed = pipeline(
|
||||
Tasks.sentence_embedding,
|
||||
model=self.model_id,
|
||||
model_revision=self.model_revision,
|
||||
)
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
@@ -295,7 +295,13 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
if self.openai_api_type in ("azure", "azure_ad", "azuread"):
|
||||
openai_args["engine"] = self.deployment
|
||||
if self.openai_proxy:
|
||||
import openai
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
|
||||
openai.proxy = {
|
||||
"http": self.openai_proxy,
|
||||
@@ -338,10 +344,10 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
disallowed_special=self.disallowed_special,
|
||||
)
|
||||
for j in range(0, len(token), self.embedding_ctx_length):
|
||||
tokens += [token[j : j + self.embedding_ctx_length]]
|
||||
indices += [i]
|
||||
tokens.append(token[j : j + self.embedding_ctx_length])
|
||||
indices.append(i)
|
||||
|
||||
batched_embeddings = []
|
||||
batched_embeddings: List[List[float]] = []
|
||||
_chunk_size = chunk_size or self.chunk_size
|
||||
|
||||
if self.show_progress_bar:
|
||||
@@ -360,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
input=tokens[i : i + _chunk_size],
|
||||
**self._invocation_params,
|
||||
)
|
||||
batched_embeddings += [r["embedding"] for r in response["data"]]
|
||||
batched_embeddings.extend(r["embedding"] for r in response["data"])
|
||||
|
||||
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
|
||||
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
|
||||
@@ -419,10 +425,10 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
disallowed_special=self.disallowed_special,
|
||||
)
|
||||
for j in range(0, len(token), self.embedding_ctx_length):
|
||||
tokens += [token[j : j + self.embedding_ctx_length]]
|
||||
indices += [i]
|
||||
tokens.append(token[j : j + self.embedding_ctx_length])
|
||||
indices.append(i)
|
||||
|
||||
batched_embeddings = []
|
||||
batched_embeddings: List[List[float]] = []
|
||||
_chunk_size = chunk_size or self.chunk_size
|
||||
for i in range(0, len(tokens), _chunk_size):
|
||||
response = await async_embed_with_retry(
|
||||
@@ -430,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
input=tokens[i : i + _chunk_size],
|
||||
**self._invocation_params,
|
||||
)
|
||||
batched_embeddings += [r["embedding"] for r in response["data"]]
|
||||
batched_embeddings.extend(r["embedding"] for r in response["data"])
|
||||
|
||||
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
|
||||
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user