mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-13 22:32:33 +00:00
Compare commits
91 Commits
vwp/rm_dep
...
v0.0.221
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5a45363954 | ||
|
|
7acd524210 | ||
|
|
9dc77614e3 | ||
|
|
e5f6f0ffc4 | ||
|
|
052c797429 | ||
|
|
dc2264619a | ||
|
|
6a64870ea0 | ||
|
|
7ebb76a5fa | ||
|
|
8d2281a8ca | ||
|
|
3bfe7cf467 | ||
|
|
556c425042 | ||
|
|
0498dad562 | ||
|
|
59697b406d | ||
|
|
aa37b10b28 | ||
|
|
b0859c9b18 | ||
|
|
a5b206caf3 | ||
|
|
b26cca8008 | ||
|
|
e4625846e5 | ||
|
|
e3b7effc8f | ||
|
|
1ce9ef3828 | ||
|
|
eb180e321f | ||
|
|
64039b9f11 | ||
|
|
13c62cf6b1 | ||
|
|
8c73037dff | ||
|
|
8f5eca236f | ||
|
|
60b0d6ea35 | ||
|
|
521c6f0233 | ||
|
|
bd6a0ee9e9 | ||
|
|
f780678910 | ||
|
|
73831ef3d8 | ||
|
|
7d8830f707 | ||
|
|
0f6737735d | ||
|
|
e9950392dd | ||
|
|
429f4dbe4d | ||
|
|
76d03f398d | ||
|
|
5861770a53 | ||
|
|
140ba682f1 | ||
|
|
9ca1cf003c | ||
|
|
20c6ade2fc | ||
|
|
6157bdf9d9 | ||
|
|
1c66aa6d56 | ||
|
|
0ba175e13f | ||
|
|
75fb9d2fdc | ||
|
|
f5663603cf | ||
|
|
be164b20d8 | ||
|
|
8502117f62 | ||
|
|
6370808d41 | ||
|
|
cbd759aaeb | ||
|
|
3ac08c3de4 | ||
|
|
a6b40b73e5 | ||
|
|
99cfe192da | ||
|
|
2e39ede848 | ||
|
|
398e4cd2dc | ||
|
|
57f370cde9 | ||
|
|
c9c8d2599e | ||
|
|
16b11bda83 | ||
|
|
f07dd02b50 | ||
|
|
e5611565b7 | ||
|
|
9d1bd18596 | ||
|
|
a435a436c1 | ||
|
|
d6cd0deaef | ||
|
|
1db266b20d | ||
|
|
3f9900a864 | ||
|
|
3ca1a387c2 | ||
|
|
f92ccf70fd | ||
|
|
f3d178f600 | ||
|
|
dd2a151543 | ||
|
|
d6664af0ee | ||
|
|
efe0d39c6a | ||
|
|
b4c196f785 | ||
|
|
f1070de038 | ||
|
|
ef72a7cf26 | ||
|
|
a980095efc | ||
|
|
74848aafea | ||
|
|
b24472eae3 | ||
|
|
e53995836a | ||
|
|
e494b0a09f | ||
|
|
da462d9dd4 | ||
|
|
24e4ae95ba | ||
|
|
8392ca602c | ||
|
|
fcb3a64799 | ||
|
|
e1fdb67440 | ||
|
|
ad028bbb80 | ||
|
|
6ca383ecf6 | ||
|
|
7ac9b22886 | ||
|
|
4535b0b41e | ||
|
|
07d802d088 | ||
|
|
49c864fa18 | ||
|
|
d7dbf4aefe | ||
|
|
cc60fed3be | ||
|
|
2928b080f6 |
@@ -9,6 +9,9 @@ build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
jobs:
|
||||
pre_build:
|
||||
- python docs/api_reference/create_api_rst.py
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
// Load the external dependencies
|
||||
function loadScript(src, onLoadCallback) {
|
||||
const script = document.createElement('script');
|
||||
script.src = src;
|
||||
script.onload = onLoadCallback;
|
||||
document.head.appendChild(script);
|
||||
}
|
||||
|
||||
function createRootElement() {
|
||||
const rootElement = document.createElement('div');
|
||||
rootElement.id = 'my-component-root';
|
||||
document.body.appendChild(rootElement);
|
||||
return rootElement;
|
||||
}
|
||||
|
||||
|
||||
|
||||
function initializeMendable() {
|
||||
const rootElement = createRootElement();
|
||||
const { MendableFloatingButton } = Mendable;
|
||||
|
||||
|
||||
const iconSpan1 = React.createElement('span', {
|
||||
}, '🦜');
|
||||
|
||||
const iconSpan2 = React.createElement('span', {
|
||||
}, '🔗');
|
||||
|
||||
const icon = React.createElement('p', {
|
||||
style: { color: '#ffffff', fontSize: '22px',width: '48px', height: '48px', margin: '0px', padding: '0px', display: 'flex', alignItems: 'center', justifyContent: 'center', textAlign: 'center' },
|
||||
}, [iconSpan1, iconSpan2]);
|
||||
|
||||
const mendableFloatingButton = React.createElement(
|
||||
MendableFloatingButton,
|
||||
{
|
||||
style: { darkMode: false, accentColor: '#010810' },
|
||||
floatingButtonStyle: { color: '#ffffff', backgroundColor: '#010810' },
|
||||
anon_key: '82842b36-3ea6-49b2-9fb8-52cfc4bde6bf', // Mendable Search Public ANON key, ok to be public
|
||||
cmdShortcutKey:'j',
|
||||
messageSettings: {
|
||||
openSourcesInNewTab: false,
|
||||
prettySources: true // Prettify the sources displayed now
|
||||
},
|
||||
icon: icon,
|
||||
}
|
||||
);
|
||||
|
||||
ReactDOM.render(mendableFloatingButton, rootElement);
|
||||
}
|
||||
|
||||
loadScript('https://unpkg.com/react@17/umd/react.production.min.js', () => {
|
||||
loadScript('https://unpkg.com/react-dom@17/umd/react-dom.production.min.js', () => {
|
||||
loadScript('https://unpkg.com/@mendable/search@0.0.102/dist/umd/mendable.min.js', initializeMendable);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,12 +0,0 @@
|
||||
Agents
|
||||
==============
|
||||
|
||||
Reference guide for Agents and associated abstractions.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/agents
|
||||
modules/tools
|
||||
modules/agent_toolkits
|
||||
1860
docs/api_reference/api_reference.rst
Normal file
1860
docs/api_reference/api_reference.rst
Normal file
File diff suppressed because it is too large
Load Diff
@@ -11,12 +11,13 @@
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
import os
|
||||
import sys
|
||||
|
||||
import toml
|
||||
|
||||
sys.path.insert(0, os.path.abspath("."))
|
||||
|
||||
with open("../../pyproject.toml") as f:
|
||||
data = toml.load(f)
|
||||
|
||||
@@ -45,11 +46,9 @@ extensions = [
|
||||
"sphinx.ext.napoleon",
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinxcontrib.autodoc_pydantic",
|
||||
"myst_nb",
|
||||
"sphinx_copybutton",
|
||||
"sphinx_panels",
|
||||
"IPython.sphinxext.ipython_console_highlighting",
|
||||
"sphinx_tabs.tabs",
|
||||
]
|
||||
source_suffix = [".rst"]
|
||||
|
||||
@@ -59,24 +58,22 @@ autodoc_pydantic_config_members = False
|
||||
autodoc_pydantic_model_show_config_summary = False
|
||||
autodoc_pydantic_model_show_validator_members = False
|
||||
autodoc_pydantic_model_show_validator_summary = False
|
||||
autodoc_pydantic_model_show_field_summary = False
|
||||
autodoc_pydantic_model_members = False
|
||||
autodoc_pydantic_model_undoc_members = False
|
||||
autodoc_pydantic_model_hide_paramlist = False
|
||||
autodoc_pydantic_model_signature_prefix = "class"
|
||||
autodoc_pydantic_field_signature_prefix = "attribute"
|
||||
autodoc_pydantic_model_summary_list_order = "bysource"
|
||||
autodoc_member_order = "bysource"
|
||||
autodoc_pydantic_field_signature_prefix = "param"
|
||||
autodoc_member_order = "groupwise"
|
||||
autoclass_content = "both"
|
||||
autodoc_typehints_format = "short"
|
||||
|
||||
autodoc_default_options = {
|
||||
"members": True,
|
||||
"show-inheritance": True,
|
||||
"undoc_members": True,
|
||||
"inherited_members": "BaseModel",
|
||||
"inherited-members": "BaseModel",
|
||||
"undoc-members": True,
|
||||
"special-members": "__call__",
|
||||
}
|
||||
autodoc_typehints = "description"
|
||||
|
||||
# autodoc_typehints = "description"
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["_templates"]
|
||||
templates_path = ["templates"]
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
@@ -89,14 +86,16 @@ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
html_theme = "scikit-learn-modern"
|
||||
html_theme_path = ["themes"]
|
||||
|
||||
html_theme_options = {
|
||||
"path_to_docs": "docs",
|
||||
"repository_url": "https://github.com/hwchase17/langchain",
|
||||
"use_repository_button": True,
|
||||
# "style_nav_header_background": "white"
|
||||
# redirects dictionary maps from old links to new links
|
||||
html_additional_pages = {}
|
||||
redirects = {
|
||||
"index": "api_reference",
|
||||
}
|
||||
for old_link in redirects:
|
||||
html_additional_pages[old_link] = "redirects.html"
|
||||
|
||||
html_context = {
|
||||
"display_github": True, # Integrate GitHub
|
||||
@@ -104,6 +103,7 @@ html_context = {
|
||||
"github_repo": "langchain", # Repo name
|
||||
"github_version": "master", # Version
|
||||
"conf_py_path": "/docs/api_reference", # Path in the checkout to the docs root
|
||||
"redirects": redirects,
|
||||
}
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
@@ -116,10 +116,9 @@ html_static_path = ["_static"]
|
||||
html_css_files = [
|
||||
"css/custom.css",
|
||||
]
|
||||
html_use_index = False
|
||||
|
||||
html_js_files = [
|
||||
"js/mendablesearch.js",
|
||||
]
|
||||
|
||||
nb_execution_mode = "off"
|
||||
myst_enable_extensions = ["colon_fence"]
|
||||
|
||||
# generate autosummary even if no references
|
||||
autosummary_generate = True
|
||||
|
||||
94
docs/api_reference/create_api_rst.py
Normal file
94
docs/api_reference/create_api_rst.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""Script for auto-generating api_reference.rst"""
|
||||
import glob
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
PKG_DIR = ROOT_DIR / "langchain"
|
||||
WRITE_FILE = Path(__file__).parent / "api_reference.rst"
|
||||
|
||||
|
||||
def load_members() -> dict:
|
||||
members: dict = {}
|
||||
for py in glob.glob(str(PKG_DIR) + "/**/*.py", recursive=True):
|
||||
module = py[len(str(PKG_DIR)) + 1 :].replace(".py", "").replace("/", ".")
|
||||
top_level = module.split(".")[0]
|
||||
if top_level not in members:
|
||||
members[top_level] = {"classes": [], "functions": []}
|
||||
with open(py, "r") as f:
|
||||
for line in f.readlines():
|
||||
cls = re.findall(r"^class ([^_].*)\(", line)
|
||||
members[top_level]["classes"].extend([module + "." + c for c in cls])
|
||||
func = re.findall(r"^def ([^_].*)\(", line)
|
||||
members[top_level]["functions"].extend([module + "." + f for f in func])
|
||||
return members
|
||||
|
||||
|
||||
def construct_doc(members: dict) -> str:
|
||||
full_doc = """\
|
||||
.. _api_reference:
|
||||
|
||||
=============
|
||||
API Reference
|
||||
=============
|
||||
|
||||
"""
|
||||
for module, _members in sorted(members.items(), key=lambda kv: kv[0]):
|
||||
classes = _members["classes"]
|
||||
functions = _members["functions"]
|
||||
if not (classes or functions):
|
||||
continue
|
||||
|
||||
module_title = module.replace("_", " ").title()
|
||||
if module_title == "Llms":
|
||||
module_title = "LLMs"
|
||||
section = f":mod:`langchain.{module}`: {module_title}"
|
||||
full_doc += f"""\
|
||||
{section}
|
||||
{'=' * (len(section) + 1)}
|
||||
|
||||
.. automodule:: langchain.{module}
|
||||
:no-members:
|
||||
:no-inherited-members:
|
||||
|
||||
"""
|
||||
|
||||
if classes:
|
||||
cstring = "\n ".join(sorted(classes))
|
||||
full_doc += f"""\
|
||||
Classes
|
||||
--------------
|
||||
.. currentmodule:: langchain
|
||||
|
||||
.. autosummary::
|
||||
:toctree: {module}
|
||||
:template: class.rst
|
||||
|
||||
{cstring}
|
||||
|
||||
"""
|
||||
if functions:
|
||||
fstring = "\n ".join(sorted(functions))
|
||||
full_doc += f"""\
|
||||
Functions
|
||||
--------------
|
||||
.. currentmodule:: langchain
|
||||
|
||||
.. autosummary::
|
||||
:toctree: {module}
|
||||
|
||||
{fstring}
|
||||
|
||||
"""
|
||||
return full_doc
|
||||
|
||||
|
||||
def main() -> None:
|
||||
members = load_members()
|
||||
full_doc = construct_doc(members)
|
||||
with open(WRITE_FILE, "w") as f:
|
||||
f.write(full_doc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,13 +0,0 @@
|
||||
Data connection
|
||||
==============
|
||||
LangChain has a number of modules that help you load, structure, store, and retrieve documents.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/document_loaders
|
||||
modules/document_transformers
|
||||
modules/embeddings
|
||||
modules/vectorstores
|
||||
modules/retrievers
|
||||
@@ -1,29 +1,8 @@
|
||||
API Reference
|
||||
==========================
|
||||
|
||||
| Full documentation on all methods, classes, and APIs in the LangChain Python package.
|
||||
=============
|
||||
LangChain API
|
||||
=============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Abstractions
|
||||
:maxdepth: 2
|
||||
|
||||
./modules/base_classes.rst
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Core
|
||||
|
||||
./model_io.rst
|
||||
./data_connection.rst
|
||||
./modules/chains.rst
|
||||
./agents.rst
|
||||
./modules/memory.rst
|
||||
./modules/callbacks.rst
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Additional
|
||||
|
||||
./modules/utilities.rst
|
||||
./modules/experimental.rst
|
||||
api_reference.rst
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
Model I/O
|
||||
==============
|
||||
|
||||
LangChain provides interfaces and integrations for working with language models.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
./prompts.rst
|
||||
./models.rst
|
||||
./modules/output_parsers.rst
|
||||
@@ -1,11 +0,0 @@
|
||||
Models
|
||||
==============
|
||||
|
||||
LangChain provides interfaces and integrations for a number of different types of models.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/llms
|
||||
modules/chat_models
|
||||
@@ -1,7 +0,0 @@
|
||||
Agent Toolkits
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.agents.agent_toolkits
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Agents
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.agents
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
Base classes
|
||||
========================
|
||||
|
||||
.. automodule:: langchain.schema
|
||||
:inherited-members:
|
||||
@@ -1,7 +0,0 @@
|
||||
Callbacks
|
||||
=======================
|
||||
|
||||
.. automodule:: langchain.callbacks
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
Chains
|
||||
=======================
|
||||
|
||||
.. automodule:: langchain.chains
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members: BaseModel
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Chat Models
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.chat_models
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Document Loaders
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.document_loaders
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
Document Transformers
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.document_transformers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Text Splitters
|
||||
------------------------------
|
||||
|
||||
.. automodule:: langchain.text_splitter
|
||||
:members:
|
||||
:undoc-members:
|
||||
@@ -1,5 +0,0 @@
|
||||
Embeddings
|
||||
===========================
|
||||
|
||||
.. automodule:: langchain.embeddings
|
||||
:members:
|
||||
@@ -1,5 +0,0 @@
|
||||
Example Selector
|
||||
=========================================
|
||||
|
||||
.. automodule:: langchain.prompts.example_selector
|
||||
:members:
|
||||
@@ -1,28 +0,0 @@
|
||||
====================
|
||||
Experimental
|
||||
====================
|
||||
|
||||
This module contains experimental modules and reproductions of existing work using LangChain primitives.
|
||||
|
||||
Autonomous agents
|
||||
------------------
|
||||
|
||||
Here, we document the BabyAGI and AutoGPT classes from the langchain.experimental module.
|
||||
|
||||
.. autoclass:: langchain.experimental.BabyAGI
|
||||
:members:
|
||||
|
||||
.. autoclass:: langchain.experimental.AutoGPT
|
||||
:members:
|
||||
|
||||
|
||||
Generative agents
|
||||
------------------
|
||||
|
||||
Here, we document the GenerativeAgent and GenerativeAgentMemory classes from the langchain.experimental module.
|
||||
|
||||
.. autoclass:: langchain.experimental.GenerativeAgent
|
||||
:members:
|
||||
|
||||
.. autoclass:: langchain.experimental.GenerativeAgentMemory
|
||||
:members:
|
||||
@@ -1,7 +0,0 @@
|
||||
LLMs
|
||||
=======================
|
||||
|
||||
.. automodule:: langchain.llms
|
||||
:members:
|
||||
:inherited-members:
|
||||
:special-members: __call__
|
||||
@@ -1,7 +0,0 @@
|
||||
Memory
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.memory
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Output Parsers
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.output_parsers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
Prompt Templates
|
||||
========================
|
||||
|
||||
.. automodule:: langchain.prompts
|
||||
:members:
|
||||
:undoc-members:
|
||||
@@ -1,14 +0,0 @@
|
||||
Retrievers
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.retrievers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
Document compressors
|
||||
-------------------------------
|
||||
|
||||
.. automodule:: langchain.retrievers.document_compressors
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Tools
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.tools
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Utilities
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.utilities
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
Vector Stores
|
||||
=============================
|
||||
|
||||
.. automodule:: langchain.vectorstores
|
||||
:members:
|
||||
:undoc-members:
|
||||
@@ -1,11 +0,0 @@
|
||||
Prompts
|
||||
==============
|
||||
|
||||
The reference guides here all relate to objects for working with Prompts.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
modules/prompts
|
||||
modules/example_selector
|
||||
27
docs/api_reference/templates/COPYRIGHT.txt
Normal file
27
docs/api_reference/templates/COPYRIGHT.txt
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2007-2023 The scikit-learn developers.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
28
docs/api_reference/templates/class.rst
Normal file
28
docs/api_reference/templates/class.rst
Normal file
@@ -0,0 +1,28 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
15
docs/api_reference/templates/redirects.html
Normal file
15
docs/api_reference/templates/redirects.html
Normal file
@@ -0,0 +1,15 @@
|
||||
{% set redirect = pathto(redirects[pagename]) %}
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta http-equiv="Refresh" content="0; url={{ redirect }}" />
|
||||
<meta name="Description" content="scikit-learn: machine learning in Python">
|
||||
<link rel="canonical" href="{{ redirect }}" />
|
||||
<title>scikit-learn: machine learning in Python</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>You will be automatically redirected to the <a href="{{ redirect }}">new location of this page</a>.</p>
|
||||
</body>
|
||||
</html>
|
||||
27
docs/api_reference/themes/COPYRIGHT.txt
Normal file
27
docs/api_reference/themes/COPYRIGHT.txt
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2007-2023 The scikit-learn developers.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -0,0 +1,67 @@
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
/* Add a [>>>] button on the top-right corner of code samples to hide
|
||||
* the >>> and ... prompts and the output and thus make the code
|
||||
* copyable. */
|
||||
var div = $('.highlight-python .highlight,' +
|
||||
'.highlight-python3 .highlight,' +
|
||||
'.highlight-pycon .highlight,' +
|
||||
'.highlight-default .highlight')
|
||||
var pre = div.find('pre');
|
||||
|
||||
// get the styles from the current theme
|
||||
pre.parent().parent().css('position', 'relative');
|
||||
var hide_text = 'Hide prompts and outputs';
|
||||
var show_text = 'Show prompts and outputs';
|
||||
|
||||
// create and add the button to all the code blocks that contain >>>
|
||||
div.each(function(index) {
|
||||
var jthis = $(this);
|
||||
if (jthis.find('.gp').length > 0) {
|
||||
var button = $('<span class="copybutton">>>></span>');
|
||||
button.attr('title', hide_text);
|
||||
button.data('hidden', 'false');
|
||||
jthis.prepend(button);
|
||||
}
|
||||
// tracebacks (.gt) contain bare text elements that need to be
|
||||
// wrapped in a span to work with .nextUntil() (see later)
|
||||
jthis.find('pre:has(.gt)').contents().filter(function() {
|
||||
return ((this.nodeType == 3) && (this.data.trim().length > 0));
|
||||
}).wrap('<span>');
|
||||
});
|
||||
|
||||
// define the behavior of the button when it's clicked
|
||||
$('.copybutton').click(function(e){
|
||||
e.preventDefault();
|
||||
var button = $(this);
|
||||
if (button.data('hidden') === 'false') {
|
||||
// hide the code output
|
||||
button.parent().find('.go, .gp, .gt').hide();
|
||||
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
|
||||
button.css('text-decoration', 'line-through');
|
||||
button.attr('title', show_text);
|
||||
button.data('hidden', 'true');
|
||||
} else {
|
||||
// show the code output
|
||||
button.parent().find('.go, .gp, .gt').show();
|
||||
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
|
||||
button.css('text-decoration', 'none');
|
||||
button.attr('title', hide_text);
|
||||
button.data('hidden', 'false');
|
||||
}
|
||||
});
|
||||
|
||||
/*** Add permalink buttons next to glossary terms ***/
|
||||
$('dl.glossary > dt[id]').append(function() {
|
||||
return ('<a class="headerlink" href="#' +
|
||||
this.getAttribute('id') +
|
||||
'" title="Permalink to this term">¶</a>');
|
||||
});
|
||||
});
|
||||
|
||||
</script>
|
||||
{%- if pagename != 'index' and pagename != 'documentation' %}
|
||||
{% if theme_mathjax_path %}
|
||||
<script id="MathJax-script" async src="{{ theme_mathjax_path }}"></script>
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
142
docs/api_reference/themes/scikit-learn-modern/layout.html
Normal file
142
docs/api_reference/themes/scikit-learn-modern/layout.html
Normal file
@@ -0,0 +1,142 @@
|
||||
{# TEMPLATE VAR SETTINGS #}
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="http://scikit-learn.org/stable/{{pagename}}.html" />
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet" href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}" type="text/css" />
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
<div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
|
||||
{%- if prev %}
|
||||
<a href="{{ prev.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ prev.title|striptags }}">Prev</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Prev</a>
|
||||
{%- endif %}
|
||||
{%- if parents -%}
|
||||
<a href="{{ parents[-1].link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ parents[-1].title|striptags }}">Up</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink disabled py-1">Up</a>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<a href="{{ next.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ next.title|striptags }}">Next</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Next</a>
|
||||
{%- endif %}
|
||||
</div>
|
||||
{%- if pagename != "install" %}
|
||||
<div class="alert alert-warning p-1 mb-2" role="alert">
|
||||
<p class="text-center mb-0">
|
||||
<strong>LangChain {{ release }}</strong><br/>
|
||||
</p>
|
||||
</div>
|
||||
{%- endif %}
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}" class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}" class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
</body>
|
||||
</html>
|
||||
85
docs/api_reference/themes/scikit-learn-modern/nav.html
Normal file
85
docs/api_reference/themes/scikit-learn-modern/nav.html
Normal file
@@ -0,0 +1,85 @@
|
||||
{%- if pagename != 'index' and pagename != 'documentation' %}
|
||||
{%- set nav_bar_class = "sk-docs-navbar" %}
|
||||
{%- set top_container_cls = "sk-docs-container" %}
|
||||
{%- else %}
|
||||
{%- set nav_bar_class = "sk-landing-navbar" %}
|
||||
{%- set top_container_cls = "sk-landing-container" %}
|
||||
{%- endif %}
|
||||
|
||||
{% if theme_link_to_live_contributing_page|tobool %}
|
||||
{# Link to development page for live builds #}
|
||||
{%- set development_link = "https://scikit-learn.org/dev/developers/index.html" %}
|
||||
{# Open on a new development page in new window/tab for live builds #}
|
||||
{%- set development_attrs = 'target="_blank" rel="noopener noreferrer"' %}
|
||||
{%- else %}
|
||||
{%- set development_link = pathto('developers/index') %}
|
||||
{%- set development_attrs = '' %}
|
||||
{%- endif %}
|
||||
|
||||
{# title, link, link_attrs #}
|
||||
{%- set drop_down_navigation = [
|
||||
('Getting Started', pathto('getting_started'), ''),
|
||||
('Tutorial', pathto('tutorial/index'), ''),
|
||||
("What's new", pathto('whats_new/v' + version), ''),
|
||||
('Glossary', pathto('glossary'), ''),
|
||||
('Development', development_link, development_attrs),
|
||||
('FAQ', pathto('faq'), ''),
|
||||
('Support', pathto('support'), ''),
|
||||
('Related packages', pathto('related_projects'), ''),
|
||||
('Roadmap', pathto('roadmap'), ''),
|
||||
('Governance', pathto('governance'), ''),
|
||||
('About us', pathto('about'), ''),
|
||||
('GitHub', 'https://github.com/scikit-learn/scikit-learn', ''),
|
||||
('Other Versions and Download', 'https://scikit-learn.org/dev/versions.html', '')]
|
||||
-%}
|
||||
|
||||
<nav id="navbar" class="{{ nav_bar_class }} navbar navbar-expand-md navbar-light bg-light py-0">
|
||||
<div class="container-fluid {{ top_container_cls }} px-0">
|
||||
{%- if logo_url %}
|
||||
<a class="navbar-brand py-0" href="{{ pathto('index') }}">
|
||||
<img
|
||||
class="sk-brand-img"
|
||||
src="{{ logo_url|e }}"
|
||||
alt="logo"/>
|
||||
</a>
|
||||
{%- endif %}
|
||||
<button
|
||||
id="sk-navbar-toggler"
|
||||
class="navbar-toggler"
|
||||
type="button"
|
||||
data-toggle="collapse"
|
||||
data-target="#navbarSupportedContent"
|
||||
aria-controls="navbarSupportedContent"
|
||||
aria-expanded="false"
|
||||
aria-label="Toggle navigation"
|
||||
>
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
|
||||
<div class="sk-navbar-collapse collapse navbar-collapse" id="navbarSupportedContent">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('api_reference') }}">API</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" target="_blank" rel="noopener noreferrer" href="https://python.langchain.com/">Python Docs</a>
|
||||
</li>
|
||||
{%- for title, link, link_attrs in drop_down_navigation %}
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link nav-more-item-mobile-items" href="{{ link }}" {{ link_attrs }}>{{ title }}</a>
|
||||
</li>
|
||||
{%- endfor %}
|
||||
</ul>
|
||||
{%- if pagename != "search"%}
|
||||
<div id="searchbox" role="search">
|
||||
<div class="searchformwrapper">
|
||||
<form class="search" action="{{ pathto('search') }}" method="get">
|
||||
<input class="sk-search-text-input" type="text" name="q" aria-labelledby="searchlabel" />
|
||||
<input class="sk-search-text-btn" type="submit" value="{{ _('Go') }}" />
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
16
docs/api_reference/themes/scikit-learn-modern/search.html
Normal file
16
docs/api_reference/themes/scikit-learn-modern/search.html
Normal file
@@ -0,0 +1,16 @@
|
||||
{%- extends "basic/search.html" %}
|
||||
{% block extrahead %}
|
||||
<script type="text/javascript" src="{{ pathto('_static/underscore.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('searchindex.js', 1) }}" defer></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/doctools.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/language_data.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/searchtools.js', 1) }}"></script>
|
||||
<!-- <script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script> -->
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
if (!Search.out) {
|
||||
Search.init();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
1395
docs/api_reference/themes/scikit-learn-modern/static/css/theme.css
Normal file
1395
docs/api_reference/themes/scikit-learn-modern/static/css/theme.css
Normal file
File diff suppressed because it is too large
Load Diff
6
docs/api_reference/themes/scikit-learn-modern/static/css/vendor/bootstrap.min.css
vendored
Normal file
6
docs/api_reference/themes/scikit-learn-modern/static/css/vendor/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
6
docs/api_reference/themes/scikit-learn-modern/static/js/vendor/bootstrap.min.js
vendored
Normal file
6
docs/api_reference/themes/scikit-learn-modern/static/js/vendor/bootstrap.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
2
docs/api_reference/themes/scikit-learn-modern/static/js/vendor/jquery-3.6.3.slim.min.js
vendored
Normal file
2
docs/api_reference/themes/scikit-learn-modern/static/js/vendor/jquery-3.6.3.slim.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
8
docs/api_reference/themes/scikit-learn-modern/theme.conf
Normal file
8
docs/api_reference/themes/scikit-learn-modern/theme.conf
Normal file
@@ -0,0 +1,8 @@
|
||||
[theme]
|
||||
inherit = basic
|
||||
pygments_style = default
|
||||
stylesheet = css/theme.css
|
||||
|
||||
[options]
|
||||
link_to_live_contributing_page = false
|
||||
mathjax_path =
|
||||
@@ -47,7 +47,7 @@ import ChatModel from "@snippets/get_started/quickstart/chat_model.mdx"
|
||||
|
||||
## Prompt templates
|
||||
|
||||
Most LLM applications do not pass user input directly into to an LLM. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand.
|
||||
Most LLM applications do not pass user input directly into an LLM. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand.
|
||||
|
||||
In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it'd be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions.
|
||||
|
||||
@@ -138,7 +138,7 @@ The chains and agents we've looked at so far have been stateless, but for many a
|
||||
|
||||
The Memory module gives you a way to maintain application state. The base Memory interface is simple: it lets you update state given the latest run inputs and outputs and it lets you modify (or contextualize) the next input using the stored state.
|
||||
|
||||
There are a number of built-in memory systems. The simplest of these are is a buffer memory which just prepends the last few inputs/outputs to the current input - we will use this in the example below.
|
||||
There are a number of built-in memory systems. The simplest of these is a buffer memory which just prepends the last few inputs/outputs to the current input - we will use this in the example below.
|
||||
|
||||
import MemoryLLM from "@snippets/get_started/quickstart/memory_llms.mdx"
|
||||
import MemoryChatModel from "@snippets/get_started/quickstart/memory_chat_models.mdx"
|
||||
@@ -155,4 +155,4 @@ You can use Memory with chains and agents initialized with chat models. The main
|
||||
<MemoryChatModel/>
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
@@ -10,7 +10,7 @@ for you.
|
||||
|
||||
## Get started
|
||||
|
||||
This walkthrough showcases basic functionality related to VectorStores. A key part of working with vector stores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model](/docs/modules/model_io/models/embeddings.html) interfaces before diving into this.
|
||||
This walkthrough showcases basic functionality related to VectorStores. A key part of working with vector stores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model](/docs/modules/data_connection/text_embedding/) interfaces before diving into this.
|
||||
|
||||
import GetStarted from "@snippets/modules/data_connection/vectorstores/get_started.mdx"
|
||||
|
||||
|
||||
@@ -7,7 +7,10 @@ const { ProvidePlugin } = require("webpack");
|
||||
const path = require("path");
|
||||
|
||||
const examplesPath = path.resolve(__dirname, "..", "examples", "src");
|
||||
const snippetsPath = path.resolve(__dirname, "..", "snippets")
|
||||
const snippetsPath = path.resolve(__dirname, "..", "snippets");
|
||||
|
||||
const baseLightCodeBlockTheme = require("prism-react-renderer/themes/vsLight");
|
||||
const baseDarkCodeBlockTheme = require("prism-react-renderer/themes/vsDark");
|
||||
|
||||
/** @type {import('@docusaurus/types').Config} */
|
||||
const config = {
|
||||
@@ -127,8 +130,20 @@ const config = {
|
||||
},
|
||||
},
|
||||
prism: {
|
||||
theme: require("prism-react-renderer/themes/vsLight"),
|
||||
darkTheme: require("prism-react-renderer/themes/vsDark"),
|
||||
theme: {
|
||||
...baseLightCodeBlockTheme,
|
||||
plain: {
|
||||
...baseLightCodeBlockTheme.plain,
|
||||
backgroundColor: "#F5F5F5",
|
||||
},
|
||||
},
|
||||
darkTheme: {
|
||||
...baseDarkCodeBlockTheme,
|
||||
plain: {
|
||||
...baseDarkCodeBlockTheme.plain,
|
||||
backgroundColor: "#222222",
|
||||
},
|
||||
},
|
||||
},
|
||||
image: "img/parrot-chainlink-icon.png",
|
||||
navbar: {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
464
docs/extras/ecosystem/integrations/arthur_tracking.ipynb
Normal file
464
docs/extras/ecosystem/integrations/arthur_tracking.ipynb
Normal file
@@ -0,0 +1,464 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "944e4194",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Arthur LangChain integration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1ccdfe8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[Arthur](https://www.arthur.ai/) is a model monitoring and observability platform.\n",
|
||||
"\n",
|
||||
"This notebook shows how to register LLMs (chat and non-chat) as models with the Arthur platform. Then we show how to set up langchain LLMs with an Arthur callback that will automatically log model inferences to Arthur.\n",
|
||||
"\n",
|
||||
"For more information about how to use the Arthur SDK, visit our [docs](http://docs.arthur.ai), in particular our [model onboarding guide](https://docs.arthur.ai/user-guide/walkthroughs/model-onboarding/index.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "961c6691",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import ArthurCallbackHandler\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chat_models import ChatOpenAI, ChatAnthropic\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain.llms import OpenAI, Cohere, HuggingFacePipeline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a23d1963",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from arthurai import ArthurAI\n",
|
||||
"from arthurai.common.constants import InputType, OutputType, Stage, ValueType\n",
|
||||
"from arthurai.core.attributes import ArthurAttribute, AttributeCategory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4d1b90c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ArthurModel for chatbot with only input text and output text attributes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a4a4a8a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Connect to Arthur client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f49e9b79",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"arthur_url = \"https://app.arthur.ai\"\n",
|
||||
"arthur_login = \"your-username-here\"\n",
|
||||
"arthur = ArthurAI(url=arthur_url, login=arthur_login)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c6e063bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Before you can register model inferences to Arthur, you must have a registered model with an ID in the Arthur platform. We will provide this ID to the ArthurCallbackHandler.\n",
|
||||
"\n",
|
||||
"You can register a model with Arthur here in the notebook using this `register_chat_llm()` function. This function returns the ID of the model saved to the platform. To use the function, uncomment `arthur_model_chatbot_id = register_chat_llm()` in the cell below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "31b17b5e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def register_chat_llm():\n",
|
||||
"\n",
|
||||
" arthur_model = arthur.model(\n",
|
||||
" display_name=\"LangChainChat\",\n",
|
||||
" input_type=InputType.NLP,\n",
|
||||
" output_type=OutputType.TokenSequence\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"my_input_text\",\n",
|
||||
" stage=Stage.ModelPipelineInput,\n",
|
||||
" value_type=ValueType.Unstructured_Text,\n",
|
||||
" categorical=True,\n",
|
||||
" is_unique=True\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"my_output_text\",\n",
|
||||
" stage=Stage.PredictedValue,\n",
|
||||
" value_type=ValueType.Unstructured_Text,\n",
|
||||
" categorical=True,\n",
|
||||
" is_unique=False,\n",
|
||||
" ))\n",
|
||||
" \n",
|
||||
" return arthur_model.save()\n",
|
||||
"# arthur_model_chatbot_id = register_chat_llm()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0d1d1e60",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can set the `arthur_model_chatbot_id` variable to be the ID of your model on your [model dashboard](https://app.arthur.ai/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cdfa02c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"arthur_model_chatbot_id = \"your-model-id-here\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "58be5234",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This function creates a Langchain chat LLM with the ArthurCallbackHandler to log inferences to Arthur. We provide our `arthur_model_chatbot_id`, as well as the Arthur url and login we are using."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "448a8fee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def make_langchain_chat_llm(chat_model=ChatOpenAI):\n",
|
||||
" if chat_model not in [ChatOpenAI, ChatAnthropic]:\n",
|
||||
" raise ValueError(\"For this notebook, use one of the chat models imported from langchain.chat_models\")\n",
|
||||
" return chat_model(\n",
|
||||
" streaming=True, \n",
|
||||
" temperature=0.1,\n",
|
||||
" callbacks=[\n",
|
||||
" StreamingStdOutCallbackHandler(), \n",
|
||||
" ArthurCallbackHandler.from_credentials(arthur_model_chatbot_id, arthur_url=arthur_url, arthur_login=arthur_login)\n",
|
||||
" ])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17c182da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2dfc00ed",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_llm = make_langchain_chat_llm()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "139291f2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the chatbot (it will save the chat history in the `history` list so that the conversation can reference earlier messages)\n",
|
||||
"\n",
|
||||
"Type `q` to quit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7480a443",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def run_langchain_chat_llm(llm):\n",
|
||||
" history = []\n",
|
||||
" while True:\n",
|
||||
" user_input = input(\"\\n>>> input >>>\\n>>>: \")\n",
|
||||
" if user_input == 'q': break\n",
|
||||
" history.append(HumanMessage(content=user_input))\n",
|
||||
" history.append(llm(history))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "6868ce71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_langchain_chat_llm(chat_llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a0be7d01",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ArthurModel with input text, output text, token likelihoods, finish reason, and amount of token usage attributes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1ee4b741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This function registers an LLM with additional metadata attributes to log to Arthur with each inference\n",
|
||||
"\n",
|
||||
"As above, you can register your callback handler for an LLM using this function here in the notebook or by pasting the ID of an already-registered model from your [model dashboard](https://app.arthur.ai/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "e671836c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def register_llm():\n",
|
||||
"\n",
|
||||
" arthur_model = arthur.model(\n",
|
||||
" display_name=\"LangChainLLM\",\n",
|
||||
" input_type=InputType.NLP,\n",
|
||||
" output_type=OutputType.TokenSequence\n",
|
||||
" )\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"my_input_text\",\n",
|
||||
" stage=Stage.ModelPipelineInput,\n",
|
||||
" value_type=ValueType.Unstructured_Text,\n",
|
||||
" categorical=True,\n",
|
||||
" is_unique=True\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"my_output_text\",\n",
|
||||
" stage=Stage.PredictedValue,\n",
|
||||
" value_type=ValueType.Unstructured_Text,\n",
|
||||
" categorical=True,\n",
|
||||
" is_unique=False,\n",
|
||||
" token_attribute_link=\"my_output_likelihoods\"\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"my_output_likelihoods\",\n",
|
||||
" stage=Stage.PredictedValue,\n",
|
||||
" value_type=ValueType.TokenLikelihoods,\n",
|
||||
" token_attribute_link=\"my_output_text\"\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"finish_reason\",\n",
|
||||
" stage=Stage.NonInputData,\n",
|
||||
" value_type=ValueType.String,\n",
|
||||
" categorical=True,\n",
|
||||
" categories=[\n",
|
||||
" AttributeCategory(value='stop'),\n",
|
||||
" AttributeCategory(value='length'),\n",
|
||||
" AttributeCategory(value='content_filter'),\n",
|
||||
" AttributeCategory(value='null')\n",
|
||||
" ]\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"prompt_tokens\",\n",
|
||||
" stage=Stage.NonInputData,\n",
|
||||
" value_type=ValueType.Integer\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"completion_tokens\",\n",
|
||||
" stage=Stage.NonInputData,\n",
|
||||
" value_type=ValueType.Integer\n",
|
||||
" ))\n",
|
||||
" arthur_model._add_attribute_to_model(ArthurAttribute(\n",
|
||||
" name=\"duration\",\n",
|
||||
" stage=Stage.NonInputData,\n",
|
||||
" value_type=ValueType.Float\n",
|
||||
" ))\n",
|
||||
" \n",
|
||||
" return arthur_model.save()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "2a6686f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"arthur_model_llm_id = \"your-model-id-here\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2dcacb96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"These functions create Langchain LLMs with the ArthurCallbackHandler to log inferences to Arthur.\n",
|
||||
"\n",
|
||||
"There are small differences in the underlying Langchain integrations with these libraries and the available metadata for model inputs & outputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "34cf0072",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def make_langchain_openai_llm():\n",
|
||||
" return OpenAI(\n",
|
||||
" temperature=0.1,\n",
|
||||
" model_kwargs = {'logprobs': 3},\n",
|
||||
" callbacks=[\n",
|
||||
" ArthurCallbackHandler.from_credentials(arthur_model_llm_id, arthur_url=arthur_url, arthur_login=arthur_login)\n",
|
||||
" ])\n",
|
||||
"\n",
|
||||
"def make_langchain_cohere_llm():\n",
|
||||
" return Cohere(\n",
|
||||
" temperature=0.1,\n",
|
||||
" callbacks=[\n",
|
||||
" ArthurCallbackHandler.from_credentials(arthur_model_chatbot_id, arthur_url=arthur_url, arthur_login=arthur_login)\n",
|
||||
" ])\n",
|
||||
"\n",
|
||||
"def make_langchain_huggingface_llm():\n",
|
||||
" llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"bert-base-uncased\", \n",
|
||||
" task=\"text-generation\", \n",
|
||||
" model_kwargs={\"temperature\":2.5, \"max_length\":64})\n",
|
||||
" llm.callbacks = [\n",
|
||||
" ArthurCallbackHandler.from_credentials(arthur_model_chatbot_id, arthur_url=arthur_url, arthur_login=arthur_login)\n",
|
||||
" ]\n",
|
||||
" return llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "f40c3ce0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openai_llm = make_langchain_openai_llm()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "8476d531",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cohere_llm = make_langchain_cohere_llm()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7483b9d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"huggingface_llm = make_langchain_huggingface_llm()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c17d8e86",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run the LLM (each completion is independent, no chat history is saved as we were doing above with the chat llms)\n",
|
||||
"\n",
|
||||
"Type `q` to quit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "72ee0790",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def run_langchain_llm(llm):\n",
|
||||
" while True:\n",
|
||||
" print(\"Type your text for completion:\\n\")\n",
|
||||
" user_input = input(\"\\n>>> input >>>\\n>>>: \")\n",
|
||||
" if user_input == 'q': break\n",
|
||||
" print(llm(user_input), \"\\n================\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "fb864057",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_langchain_llm(openai_llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "e6673769",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_langchain_llm(cohere_llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "85541f1c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_langchain_llm(huggingface_llm)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,19 +1,31 @@
|
||||
# Cassandra
|
||||
|
||||
>[Cassandra](https://en.wikipedia.org/wiki/Apache_Cassandra) is a free and open-source, distributed, wide-column
|
||||
>[Apache Cassandra®](https://cassandra.apache.org/) is a free and open-source, distributed, wide-column
|
||||
> store, NoSQL database management system designed to handle large amounts of data across many commodity servers,
|
||||
> providing high availability with no single point of failure. `Cassandra` offers support for clusters spanning
|
||||
> providing high availability with no single point of failure. Cassandra offers support for clusters spanning
|
||||
> multiple datacenters, with asynchronous masterless replication allowing low latency operations for all clients.
|
||||
> `Cassandra` was designed to implement a combination of `Amazon's Dynamo` distributed storage and replication
|
||||
> techniques combined with `Google's Bigtable` data and storage engine model.
|
||||
> Cassandra was designed to implement a combination of _Amazon's Dynamo_ distributed storage and replication
|
||||
> techniques combined with _Google's Bigtable_ data and storage engine model.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install cassandra-drive
|
||||
pip install cassandra-driver
|
||||
pip install cassio
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/cassandra.html).
|
||||
|
||||
```python
|
||||
from langchain.memory import CassandraChatMessageHistory
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
See a [usage example](/docs/modules/memory/integrations/cassandra_chat_message_history.html).
|
||||
|
||||
153
docs/extras/ecosystem/integrations/flyte.mdx
Normal file
153
docs/extras/ecosystem/integrations/flyte.mdx
Normal file
@@ -0,0 +1,153 @@
|
||||
# Flyte
|
||||
|
||||
> [Flyte](https://github.com/flyteorg/flyte) is an open-source orchestrator that facilitates building production-grade data and ML pipelines.
|
||||
> It is built for scalability and reproducibility, leveraging Kubernetes as its underlying platform.
|
||||
|
||||
The purpose of this notebook is to demonstrate the integration of a `FlyteCallback` into your Flyte task, enabling you to effectively monitor and track your LangChain experiments.
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
- Install the Flytekit library by running the command `pip install flytekit`.
|
||||
- Install the Flytekit-Envd plugin by running the command `pip install flytekitplugins-envd`.
|
||||
- Install LangChain by running the command `pip install langchain`.
|
||||
- Install [Docker](https://docs.docker.com/engine/install/) on your system.
|
||||
|
||||
## Flyte Tasks
|
||||
|
||||
A Flyte [task](https://docs.flyte.org/projects/cookbook/en/latest/auto/core/flyte_basics/task.html) serves as the foundational building block of Flyte.
|
||||
To execute LangChain experiments, you need to write Flyte tasks that define the specific steps and operations involved.
|
||||
|
||||
NOTE: The [getting started guide](https://docs.flyte.org/projects/cookbook/en/latest/index.html) offers detailed, step-by-step instructions on installing Flyte locally and running your initial Flyte pipeline.
|
||||
|
||||
First, import the necessary dependencies to support your LangChain experiments.
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
from flytekit import ImageSpec, task
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain.callbacks import FlyteCallbackHandler
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import HumanMessage
|
||||
```
|
||||
|
||||
Set up the necessary environment variables to utilize the OpenAI API and Serp API:
|
||||
|
||||
```python
|
||||
# Set OpenAI API key
|
||||
os.environ["OPENAI_API_KEY"] = "<your_openai_api_key>"
|
||||
|
||||
# Set Serp API key
|
||||
os.environ["SERPAPI_API_KEY"] = "<your_serp_api_key>"
|
||||
```
|
||||
|
||||
Replace `<your_openai_api_key>` and `<your_serp_api_key>` with your respective API keys obtained from OpenAI and Serp API.
|
||||
|
||||
To guarantee reproducibility of your pipelines, Flyte tasks are containerized.
|
||||
Each Flyte task must be associated with an image, which can either be shared across the entire Flyte [workflow](https://docs.flyte.org/projects/cookbook/en/latest/auto/core/flyte_basics/basic_workflow.html) or provided separately for each task.
|
||||
|
||||
To streamline the process of supplying the required dependencies for each Flyte task, you can initialize an [`ImageSpec`](https://docs.flyte.org/projects/cookbook/en/latest/auto/core/image_spec/image_spec.html) object.
|
||||
This approach automatically triggers a Docker build, alleviating the need for users to manually create a Docker image.
|
||||
|
||||
```python
|
||||
custom_image = ImageSpec(
|
||||
name="langchain-flyte",
|
||||
packages=[
|
||||
"langchain",
|
||||
"openai",
|
||||
"spacy",
|
||||
"https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0.tar.gz",
|
||||
"textstat",
|
||||
"google-search-results",
|
||||
],
|
||||
registry="<your-registry>",
|
||||
)
|
||||
```
|
||||
|
||||
You have the flexibility to push the Docker image to a registry of your preference.
|
||||
[Docker Hub](https://hub.docker.com/) or [GitHub Container Registry (GHCR)](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry) is a convenient option to begin with.
|
||||
|
||||
Once you have selected a registry, you can proceed to create Flyte tasks that log the LangChain metrics to Flyte Deck.
|
||||
|
||||
The following examples demonstrate tasks related to OpenAI LLM, chains and agent with tools:
|
||||
|
||||
### LLM
|
||||
|
||||
```python
|
||||
@task(disable_deck=False, container_image=custom_image)
|
||||
def langchain_llm() -> str:
|
||||
llm = ChatOpenAI(
|
||||
model_name="gpt-3.5-turbo",
|
||||
temperature=0.2,
|
||||
callbacks=[FlyteCallbackHandler()],
|
||||
)
|
||||
return llm([HumanMessage(content="Tell me a joke")]).content
|
||||
```
|
||||
|
||||
### Chain
|
||||
|
||||
```python
|
||||
@task(disable_deck=False, container_image=custom_image)
|
||||
def langchain_chain() -> list[dict[str, str]]:
|
||||
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
|
||||
Title: {title}
|
||||
Playwright: This is a synopsis for the above play:"""
|
||||
llm = ChatOpenAI(
|
||||
model_name="gpt-3.5-turbo",
|
||||
temperature=0,
|
||||
callbacks=[FlyteCallbackHandler()],
|
||||
)
|
||||
prompt_template = PromptTemplate(input_variables=["title"], template=template)
|
||||
synopsis_chain = LLMChain(
|
||||
llm=llm, prompt=prompt_template, callbacks=[FlyteCallbackHandler()]
|
||||
)
|
||||
test_prompts = [
|
||||
{
|
||||
"title": "documentary about good video games that push the boundary of game design"
|
||||
},
|
||||
]
|
||||
return synopsis_chain.apply(test_prompts)
|
||||
```
|
||||
|
||||
### Agent
|
||||
|
||||
```python
|
||||
@task(disable_deck=False, container_image=custom_image)
|
||||
def langchain_agent() -> str:
|
||||
llm = OpenAI(
|
||||
model_name="gpt-3.5-turbo",
|
||||
temperature=0,
|
||||
callbacks=[FlyteCallbackHandler()],
|
||||
)
|
||||
tools = load_tools(
|
||||
["serpapi", "llm-math"], llm=llm, callbacks=[FlyteCallbackHandler()]
|
||||
)
|
||||
agent = initialize_agent(
|
||||
tools,
|
||||
llm,
|
||||
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
callbacks=[FlyteCallbackHandler()],
|
||||
verbose=True,
|
||||
)
|
||||
return agent.run(
|
||||
"Who is Leonardo DiCaprio's girlfriend? Could you calculate her current age and raise it to the power of 0.43?"
|
||||
)
|
||||
```
|
||||
|
||||
These tasks serve as a starting point for running your LangChain experiments within Flyte.
|
||||
|
||||
## Execute the Flyte Tasks on Kubernetes
|
||||
|
||||
To execute the Flyte tasks on the configured Flyte backend, use the following command:
|
||||
|
||||
```bash
|
||||
pyflyte run --image <your-image> langchain_flyte.py langchain_llm
|
||||
```
|
||||
|
||||
This command will initiate the execution of the `langchain_llm` task on the Flyte backend. You can trigger the remaining two tasks in a similar manner.
|
||||
|
||||
The metrics will be displayed on the Flyte UI as follows:
|
||||
|
||||

|
||||
44
docs/extras/ecosystem/integrations/grobid.mdx
Normal file
44
docs/extras/ecosystem/integrations/grobid.mdx
Normal file
@@ -0,0 +1,44 @@
|
||||
# Grobid
|
||||
|
||||
This page covers how to use the Grobid to parse articles for LangChain.
|
||||
It is seperated into two parts: installation and running the server
|
||||
|
||||
## Installation and Setup
|
||||
#Ensure You have Java installed
|
||||
!apt-get install -y openjdk-11-jdk -q
|
||||
!update-alternatives --set java /usr/lib/jvm/java-11-openjdk-amd64/bin/java
|
||||
|
||||
#Clone and install the Grobid Repo
|
||||
import os
|
||||
!git clone https://github.com/kermitt2/grobid.git
|
||||
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-11-openjdk-amd64"
|
||||
os.chdir('grobid')
|
||||
!./gradlew clean install
|
||||
|
||||
#Run the server,
|
||||
get_ipython().system_raw('nohup ./gradlew run > grobid.log 2>&1 &')
|
||||
|
||||
You can now use the GrobidParser to produce documents
|
||||
```python
|
||||
from langchain.document_loaders.parsers import GrobidParser
|
||||
from langchain.document_loaders.generic import GenericLoader
|
||||
|
||||
#Produce chunks from article paragraphs
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"/Users/31treehaus/Desktop/Papers/",
|
||||
glob="*",
|
||||
suffixes=[".pdf"],
|
||||
parser= GrobidParser(segment_sentences=False)
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
#Produce chunks from article sentences
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"/Users/31treehaus/Desktop/Papers/",
|
||||
glob="*",
|
||||
suffixes=[".pdf"],
|
||||
parser= GrobidParser(segment_sentences=True)
|
||||
)
|
||||
docs = loader.load()
|
||||
```
|
||||
Chunk metadata will include bboxes although these are a bit funky to parse, see https://grobid.readthedocs.io/en/latest/Coordinates-in-PDF/
|
||||
23
docs/extras/ecosystem/integrations/hologres.mdx
Normal file
23
docs/extras/ecosystem/integrations/hologres.mdx
Normal file
@@ -0,0 +1,23 @@
|
||||
# Hologres
|
||||
|
||||
>[Hologres](https://www.alibabacloud.com/help/en/hologres/latest/introduction) is a unified real-time data warehousing service developed by Alibaba Cloud. You can use Hologres to write, update, process, and analyze large amounts of data in real time.
|
||||
>`Hologres` supports standard `SQL` syntax, is compatible with `PostgreSQL`, and supports most PostgreSQL functions. Hologres supports online analytical processing (OLAP) and ad hoc analysis for up to petabytes of data, and provides high-concurrency and low-latency online data services.
|
||||
|
||||
>`Hologres` provides **vector database** functionality by adopting [Proxima](https://www.alibabacloud.com/help/en/hologres/latest/vector-processing).
|
||||
>`Proxima` is a high-performance software library developed by `Alibaba DAMO Academy`. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance.
|
||||
|
||||
```bash
|
||||
pip install psycopg2
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/hologres.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Hologres
|
||||
```
|
||||
19
docs/extras/ecosystem/integrations/rockset.mdx
Normal file
19
docs/extras/ecosystem/integrations/rockset.mdx
Normal file
@@ -0,0 +1,19 @@
|
||||
# Rockset
|
||||
|
||||
>[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Make sure you have Rockset account and go to the web console to get the API key. Details can be found on [the website](https://rockset.com/docs/rest-api/).
|
||||
|
||||
```bash
|
||||
pip install rockset
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/rockset.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import RocksetDB
|
||||
```
|
||||
20
docs/extras/ecosystem/integrations/singlestoredb.mdx
Normal file
20
docs/extras/ecosystem/integrations/singlestoredb.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
# SingleStoreDB
|
||||
|
||||
>[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
There are several ways to establish a [connection](https://singlestoredb-python.labs.singlestore.com/generated/singlestoredb.connect.html) to the database. You can either set up environment variables or pass named parameters to the `SingleStoreDB constructor`.
|
||||
Alternatively, you may provide these parameters to the `from_documents` and `from_texts` methods.
|
||||
|
||||
```bash
|
||||
pip install singlestoredb
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/singlestoredb.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import SingleStoreDB
|
||||
```
|
||||
@@ -1,15 +1,14 @@
|
||||
# scikit-learn
|
||||
|
||||
This page covers how to use the scikit-learn package within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific scikit-learn wrappers.
|
||||
>[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms,
|
||||
> including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the Python package with `pip install scikit-learn`
|
||||
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
## Vector Store
|
||||
|
||||
`SKLearnVectorStore` provides a simple wrapper around the nearest neighbor implementation in the
|
||||
scikit-learn package, allowing you to use it as a vectorstore.
|
||||
|
||||
21
docs/extras/ecosystem/integrations/starrocks.mdx
Normal file
21
docs/extras/ecosystem/integrations/starrocks.mdx
Normal file
@@ -0,0 +1,21 @@
|
||||
# StarRocks
|
||||
|
||||
>[StarRocks](https://www.starrocks.io/) is a High-Performance Analytical Database.
|
||||
`StarRocks` is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.
|
||||
|
||||
>Usually `StarRocks` is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install pymysql
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/starrocks.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import StarRocks
|
||||
```
|
||||
19
docs/extras/ecosystem/integrations/tigris.mdx
Normal file
19
docs/extras/ecosystem/integrations/tigris.mdx
Normal file
@@ -0,0 +1,19 @@
|
||||
# Tigris
|
||||
|
||||
> [Tigris](htttps://tigrisdata.com) is an open source Serverless NoSQL Database and Search Platform designed to simplify building high-performance vector search applications.
|
||||
> `Tigris` eliminates the infrastructure complexity of managing, operating, and synchronizing multiple tools, allowing you to focus on building great applications instead.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install tigrisdb openapi-schema-pydantic openai tiktoken
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/tigris.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Tigris
|
||||
```
|
||||
22
docs/extras/ecosystem/integrations/typesense.mdx
Normal file
22
docs/extras/ecosystem/integrations/typesense.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
# Typesense
|
||||
|
||||
> [Typesense](https://typesense.org) is an open source, in-memory search engine, that you can either
|
||||
> [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run
|
||||
> on [Typesense Cloud](https://cloud.typesense.org/).
|
||||
> `Typesense` focuses on performance by storing the entire index in RAM (with a backup on disk) and also
|
||||
> focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
|
||||
```bash
|
||||
pip install typesense openapi-schema-pydantic openai tiktoken
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/modules/data_connection/vectorstores/integrations/typesense.html).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Typesense
|
||||
```
|
||||
@@ -23,11 +23,15 @@ its dependencies running locally.
|
||||
If you want to get up and running with less set up, you can
|
||||
simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or
|
||||
`UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API.
|
||||
Note that currently (as of 1 May 2023) the Unstructured API is open, but it will soon require
|
||||
an API. The [Unstructured documentation page](https://unstructured-io.github.io/) will have
|
||||
instructions on how to generate an API key once they're available. Check out the instructions
|
||||
[here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image)
|
||||
if you'd like to self-host the Unstructured API or run it locally.
|
||||
|
||||
|
||||
The Unstructured API requires API keys to make requests.
|
||||
You can generate a free API key [here](https://www.unstructured.io/api-key) and start using it today!
|
||||
Checkout the README [here](https://github.com/Unstructured-IO/unstructured-api) here to get started making API calls.
|
||||
We'd love to hear your feedback, let us know how it goes in our [community slack](https://join.slack.com/t/unstructuredw-kbe4326/shared_invite/zt-1x7cgo0pg-PTptXWylzPQF9xZolzCnwQ).
|
||||
And stay tuned for improvements to both quality and performance!
|
||||
Check out the instructions
|
||||
[here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you'd like to self-host the Unstructured API or run it locally.
|
||||
|
||||
## Wrappers
|
||||
|
||||
|
||||
448
docs/extras/guides/evaluation/comparisons.ipynb
Normal file
448
docs/extras/guides/evaluation/comparisons.ipynb
Normal file
@@ -0,0 +1,448 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Comparing Chain Outputs\n",
|
||||
"\n",
|
||||
"Suppose you have two different prompts (or LLMs). How do you know which will generate \"better\" results?\n",
|
||||
"\n",
|
||||
"One automated way to predict the preferred configuration is to use a `PairwiseStringEvaluator` like the `PairwiseStringEvalChain`<a name=\"cite_ref-1\"></a>[<sup>[1]</sup>](#cite_note-1). This chain prompts an LLM to select which output is preferred, given a specific input.\n",
|
||||
"\n",
|
||||
"For this evalution, we will need 3 things:\n",
|
||||
"1. An evaluator\n",
|
||||
"2. A dataset of inputs\n",
|
||||
"3. 2 (or more) LLMs, Chains, or Agents to compare\n",
|
||||
"\n",
|
||||
"Then we will aggregate the restults to determine the preferred model.\n",
|
||||
"\n",
|
||||
"### Step 1. Create the Evaluator\n",
|
||||
"\n",
|
||||
"In this example, you will use gpt-4 to select which output is preferred."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Optional if you are tracing the notebook\n",
|
||||
"%env LANGCHAIN_PROJECT=\"Comparing Chain Outputs\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.evaluation.comparison import PairwiseStringEvalChain\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4\")\n",
|
||||
"\n",
|
||||
"eval_chain = PairwiseStringEvalChain.from_llm(llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 2. Select Dataset\n",
|
||||
"\n",
|
||||
"If you already have real usage data for your LLM, you can use a representative sample. More examples\n",
|
||||
"provide more reliable results. We will use some example queries someone might have about how to use langchain here."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset parquet (/Users/wfh/.cache/huggingface/datasets/LangChainDatasets___parquet/LangChainDatasets--langchain-howto-queries-bbb748bbee7e77aa/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d852a1884480457292c90d8bd9d4f1e6",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.evaluation.loading import load_dataset\n",
|
||||
"\n",
|
||||
"dataset = load_dataset(\"langchain-howto-queries\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 3. Define Models to Compare\n",
|
||||
"\n",
|
||||
"We will be comparing two agents in this case."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import SerpAPIWrapper\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initialize the language model\n",
|
||||
"# You can add your own OpenAI API key by adding openai_api_key=\"<your_api_key>\" \n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n",
|
||||
"\n",
|
||||
"# Initialize the SerpAPIWrapper for search functionality\n",
|
||||
"#Replace <your_api_key> in openai_api_key=\"<your_api_key>\" with your actual SerpAPI key.\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"\n",
|
||||
"# Define a list of tools offered by the agent\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" coroutine=search.arun,\n",
|
||||
" description=\"Useful when you need to answer questions about current events. You should ask targeted questions.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"functions_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_MULTI_FUNCTIONS, verbose=False)\n",
|
||||
"conversations_agent = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Step 4. Generate Responses\n",
|
||||
"\n",
|
||||
"We will generate outputs for each of the models before evaluating them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "b076d6bf6680422aa9082d4bad4d98a3",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/20 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrying langchain.chat_models.openai.acompletion_with_retry.<locals>._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n",
|
||||
"Retrying langchain.chat_models.openai.acompletion_with_retry.<locals>._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from tqdm.notebook import tqdm\n",
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"results = []\n",
|
||||
"agents = [functions_agent, conversations_agent]\n",
|
||||
"concurrency_level = 6 # How many concurrent agents to run. May need to decrease if OpenAI is rate limiting.\n",
|
||||
"\n",
|
||||
"# We will only run the first 20 examples of this dataset to speed things up\n",
|
||||
"# This will lead to larger confidence intervals downstream.\n",
|
||||
"batch = []\n",
|
||||
"for example in tqdm(dataset[:20]):\n",
|
||||
" batch.extend([agent.acall(example['inputs']) for agent in agents])\n",
|
||||
" if len(batch) >= concurrency_level:\n",
|
||||
" batch_results = await asyncio.gather(*batch, return_exceptions=True)\n",
|
||||
" results.extend(list(zip(*[iter(batch_results)]*2)))\n",
|
||||
" batch = []\n",
|
||||
"if batch:\n",
|
||||
" batch_results = await asyncio.gather(*batch, return_exceptions=True)\n",
|
||||
" results.extend(list(zip(*[iter(batch_results)]*2)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 5. Evaluate Pairs\n",
|
||||
"\n",
|
||||
"Now it's time to evaluate the results. For each agent response, run the evaluation chain to select which output is preferred (or return a tie).\n",
|
||||
"\n",
|
||||
"Randomly select the input order to reduce the likelihood that one model will be preferred just because it is presented first."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"\n",
|
||||
"def predict_preferences(dataset, results) -> list:\n",
|
||||
" preferences = []\n",
|
||||
"\n",
|
||||
" for example, (res_a, res_b) in zip(dataset, results):\n",
|
||||
" input_ = example['inputs']\n",
|
||||
" # Flip a coin to reduce persistent position bias\n",
|
||||
" if random.random() < 0.5:\n",
|
||||
" pred_a, pred_b = res_a, res_b\n",
|
||||
" a, b = \"a\", \"b\"\n",
|
||||
" else:\n",
|
||||
" pred_a, pred_b = res_b, res_a\n",
|
||||
" a, b = \"b\", \"a\"\n",
|
||||
" eval_res = eval_chain.evaluate_string_pairs(\n",
|
||||
" prediction=pred_a['output'] if isinstance(pred_a, dict) else str(pred_a),\n",
|
||||
" prediction_b=pred_b['output'] if isinstance(pred_b, dict) else str(pred_b),\n",
|
||||
" input=input_\n",
|
||||
" )\n",
|
||||
" if eval_res[\"value\"] == \"A\":\n",
|
||||
" preferences.append(a)\n",
|
||||
" elif eval_res[\"value\"] == \"B\":\n",
|
||||
" preferences.append(b)\n",
|
||||
" else:\n",
|
||||
" preferences.append(None) # No preference\n",
|
||||
" return preferences"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"preferences = predict_preferences(dataset, results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"**Print out the ratio of preferences.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI Functions Agent: 90.00%\n",
|
||||
"Structured Chat Agent: 10.00%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import Counter\n",
|
||||
"\n",
|
||||
"name_map = {\n",
|
||||
" \"a\": \"OpenAI Functions Agent\",\n",
|
||||
" \"b\": \"Structured Chat Agent\",\n",
|
||||
"}\n",
|
||||
"counts = Counter(preferences)\n",
|
||||
"pref_ratios = {\n",
|
||||
" k: v/len(preferences) for k, v in\n",
|
||||
" counts.items()\n",
|
||||
"}\n",
|
||||
"for k, v in pref_ratios.items():\n",
|
||||
" print(f\"{name_map.get(k)}: {v:.2%}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Estimate Confidence Intervals\n",
|
||||
"\n",
|
||||
"The results seem pretty clear, but if you want to have a better sense of how confident we are, that model \"A\" (the OpenAI Functions Agent) is the preferred model, we can calculate confidence intervals. \n",
|
||||
"\n",
|
||||
"Below, use the Wilson score to estimate the confidence interval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from math import sqrt\n",
|
||||
"\n",
|
||||
"def wilson_score_interval(preferences: list, which: str = \"a\", z: float = 1.96) -> tuple:\n",
|
||||
" \"\"\"Estimate the confidence interval using the Wilson score.\n",
|
||||
" \n",
|
||||
" See: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval\n",
|
||||
" for more details, including when to use it and when it should not be used.\n",
|
||||
" \"\"\"\n",
|
||||
" total_preferences = preferences.count('a') + preferences.count('b')\n",
|
||||
" n_s = preferences.count(which)\n",
|
||||
"\n",
|
||||
" if total_preferences == 0:\n",
|
||||
" return (0, 0)\n",
|
||||
"\n",
|
||||
" p_hat = n_s / total_preferences\n",
|
||||
"\n",
|
||||
" denominator = 1 + (z**2) / total_preferences\n",
|
||||
" adjustment = (z / denominator) * sqrt(p_hat*(1-p_hat)/total_preferences + (z**2)/(4*total_preferences*total_preferences))\n",
|
||||
" center = (p_hat + (z**2) / (2*total_preferences)) / denominator\n",
|
||||
" lower_bound = min(max(center - adjustment, 0.0), 1.0)\n",
|
||||
" upper_bound = min(max(center + adjustment, 0.0), 1.0)\n",
|
||||
"\n",
|
||||
" return (lower_bound, upper_bound)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The \"OpenAI Functions Agent\" would be preferred between 69.90% and 97.21% percent of the time (with 95% confidence).\n",
|
||||
"The \"Structured Chat Agent\" would be preferred between 2.79% and 30.10% percent of the time (with 95% confidence).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for which_, name in name_map.items():\n",
|
||||
" low, high = wilson_score_interval(preferences, which=which_)\n",
|
||||
" print(f'The \"{name}\" would be preferred between {low:.2%} and {high:.2%} percent of the time (with 95% confidence).')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Print out the p-value.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The p-value is 0.00040. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n",
|
||||
"then there is a 0.04025% chance of observing the OpenAI Functions Agent be preferred at least 18\n",
|
||||
"times out of 20 trials.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from scipy import stats\n",
|
||||
"preferred_model = max(pref_ratios, key=pref_ratios.get)\n",
|
||||
"successes = preferences.count(preferred_model)\n",
|
||||
"n = len(preferences) - preferences.count(None)\n",
|
||||
"p_value = stats.binom_test(successes, n, p=0.5, alternative='two-sided')\n",
|
||||
"print(f\"\"\"The p-value is {p_value:.5f}. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n",
|
||||
"then there is a {p_value:.5%} chance of observing the {name_map.get(preferred_model)} be preferred at least {successes}\n",
|
||||
"times out of {n} trials.\"\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a name=\"cite_note-1\"></a>_1. Note: Automated evals are still an open research topic and are best used alongside other evaluation approaches. \n",
|
||||
"LLM preferences exhibit biases, including banal ones like the order of outputs.\n",
|
||||
"In choosing preferences, \"ground truth\" may not be taken into account, which may lead to scores that aren't grounded in utility._"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -102,6 +102,7 @@
|
||||
"text/plain": [
|
||||
"['conciseness',\n",
|
||||
" 'relevance',\n",
|
||||
" 'correctness',\n",
|
||||
" 'coherence',\n",
|
||||
" 'harmfulness',\n",
|
||||
" 'maliciousness',\n",
|
||||
@@ -124,8 +125,55 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2eb7dedb-913a-4d9e-b48a-9521425d1008",
|
||||
"id": "c40b1ac7-8f95-48ed-89a2-623bcc746461",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Requiring Reference Labels\n",
|
||||
"\n",
|
||||
"Some criteria may be useful only when there are ground truth reference labels. You can pass these in as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "20d8a86b-beba-42ce-b82c-d9e5ebc13686",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"With ground truth: 1\n",
|
||||
"Withoutg ground truth: 0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=\"correctness\", requires_reference=True)\n",
|
||||
"\n",
|
||||
"# We can even override the model's learned knowledge using ground truth labels\n",
|
||||
"eval_result = eval_chain.evaluate_strings(\n",
|
||||
" input=\"What is the capital of the US?\",\n",
|
||||
" prediction=\"Topeka, KS\", \n",
|
||||
" reference=\"The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023\")\n",
|
||||
"print(f'With ground truth: {eval_result[\"score\"]}')\n",
|
||||
"\n",
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=\"correctness\")\n",
|
||||
"eval_result = eval_chain.evaluate_strings(\n",
|
||||
" input=\"What is the capital of the US?\",\n",
|
||||
" prediction=\"Topeka, KS\", \n",
|
||||
")\n",
|
||||
"print(f'Withoutg ground truth: {eval_result[\"score\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2eb7dedb-913a-4d9e-b48a-9521425d1008",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Multiple Criteria\n",
|
||||
"\n",
|
||||
@@ -134,7 +182,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "50c067f7-bc6e-4d6c-ba34-97a72023be27",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -144,7 +192,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'reasoning': 'Conciseness: The submission is not concise and does not answer the given task. It provides information on the origin of the term synecdoche, which is not relevant to the task. Therefore, the submission does not meet the criterion of conciseness.\\n\\nCoherence: The submission is not coherent, well-structured, or organized. It does not provide any information related to the given task and is not connected to the topic in any way. Therefore, the submission does not meet the criterion of coherence.\\n\\nConclusion: The submission does not meet all criteria.', 'value': 'N', 'score': 0}\n"
|
||||
"{'reasoning': 'Conciseness:\\n- The submission is one sentence long, which is concise.\\n- The submission directly answers the question without any unnecessary information.\\nConclusion: The submission meets the conciseness criterion.\\n\\nCoherence:\\n- The submission is well-structured and organized.\\n- The submission provides the origin of the term synecdoche and explains the meaning of the Greek words it comes from.\\n- The submission is coherent and easy to understand.\\nConclusion: The submission meets the coherence criterion.', 'value': 'Final conclusion: Y', 'score': None}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -169,7 +217,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "bafa0a11-2617-4663-84bf-24df7d0736be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -203,9 +251,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'reasoning': '- complements-user: The submission directly answers the question asked and provides additional information about the population of Lagos. However, it does not necessarily complement the person writing the question. \\n- positive: The submission maintains a positive tone throughout and does not contain any negative language. \\n- active voice: The submission uses an active voice and avoids state of being verbs. \\n\\nTherefore, the submission meets all criteria. \\n\\nY\\n\\nY', 'value': 'Y', 'score': 1}\n",
|
||||
"Meets criteria: 1\n",
|
||||
"{'reasoning': '- complements-user: The submission directly answers the question asked in the task, so it complements the question. Therefore, the answer meets this criterion. \\n- positive: The submission does not contain any negative language or tone, so it maintains a positive sentiment throughout. Therefore, the answer meets this criterion. \\n- active voice: The submission uses the state of being verb \"is\" to describe the population, which is not in active voice. Therefore, the answer does not meet this criterion. \\n\\nAnswer: N', 'value': 'N', 'score': 0}\n",
|
||||
"Does not meet criteria: 0\n"
|
||||
]
|
||||
}
|
||||
@@ -231,10 +277,100 @@
|
||||
"print(\"Does not meet criteria: \", eval_result[\"score\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "07485cce-8d52-43a0-bdad-76ec7dacfb51",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Constitutional Principles\n",
|
||||
"\n",
|
||||
"Custom rubrics are similar to principles from [Constitutional AI](https://arxiv.org/abs/2212.08073). You can directly use your `ConstitutionalPrinciple` objects to\n",
|
||||
"instantiate the chain and take advantage of the many existing principles in LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "99e3c242-5b12-4bd5-b487-64990a159655",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"54 available principles\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('harmful1',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Identify specific ways in which the assistant’s last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.', revision_request='Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.', name='harmful1')),\n",
|
||||
" ('harmful2',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Explain ways in which the assistant’s preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.', revision_request='Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.', name='harmful2')),\n",
|
||||
" ('harmful3',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Identify all ways in which the assistant’s last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.', revision_request='Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.', name='harmful3')),\n",
|
||||
" ('harmful4',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Identify specific ways in which the assistant’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.', revision_request='Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.', name='harmful4')),\n",
|
||||
" ('insensitive',\n",
|
||||
" ConstitutionalPrinciple(critique_request='Discuss the assistant’s last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.', revision_request='Revise the assistant’s last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.', name='insensitive'))]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains.constitutional_ai.principles import PRINCIPLES\n",
|
||||
"\n",
|
||||
"print(f\"{len(PRINCIPLES)} available principles\")\n",
|
||||
"list(PRINCIPLES.items())[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "f4061bc0-a88a-4d1c-88a0-8003bba084c7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'reasoning': 'harmful1: The submission includes a derogatory and insulting term (\"lilly-livered nincompoop\") to describe Will, which could be considered harmful and disrespectful. Therefore, the submission meets this criterion.\\nharmful2: The submission does not contain any explicit encouragement of dangerous or illegal activity, nor does it contain any explicit racism or sexism. However, the use of the term \"nincompoop\" could be seen as belittling and harmful to Will\\'s self-esteem. Therefore, the submission meets this criterion as well.\\n\\nY',\n",
|
||||
" 'value': 'Y',\n",
|
||||
" 'score': 1}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"eval_chain = CriteriaEvalChain.from_llm(llm=llm, criteria=[PRINCIPLES[\"harmful1\"], PRINCIPLES[\"harmful2\"]])\n",
|
||||
"eval_result = eval_chain.evaluate_strings(prediction=\"I say that man is a lilly-livered nincompoop\", input=\"What do you think of Will?\")\n",
|
||||
"eval_result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2662405-353a-4a73-b867-784d12cafcf1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In these examples, you used the `CriteriaEvalChain` to evaluate model outputs against custom criteria, including a custom rubric and constitutional principles.\n",
|
||||
"\n",
|
||||
"Remember when selecting criteria to decide whether they ought to require ground truth labels or not. Things like \"correctness\" are best evaluated with ground truth or with extensive context. Also, remember to pick aligned principles for a given chain so that the classification makes sense."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "99e3c242-5b12-4bd5-b487-64990a159655",
|
||||
"id": "415eb393-c64f-41f1-98de-de99e8e3597e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
||||
@@ -4,9 +4,11 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Generic Agent Evaluation\n",
|
||||
"# Evaluating Agent Trajectories\n",
|
||||
"\n",
|
||||
"Good evaluation is key for quickly iterating on your agent's prompts and tools. Here we provide an example of how to use the TrajectoryEvalChain to evaluate your agent."
|
||||
"Good evaluation is key for quickly iterating on your agent's prompts and tools. One way we recommend \n",
|
||||
"\n",
|
||||
"Here we provide an example of how to use the TrajectoryEvalChain to evaluate the efficacy of the actions taken by your agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -21,7 +23,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import Wikipedia\n",
|
||||
@@ -39,7 +43,7 @@
|
||||
"\n",
|
||||
"math_llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"llm_math_chain = LLMMathChain(llm=math_llm, verbose=True)\n",
|
||||
"llm_math_chain = LLMMathChain.from_llm(llm=math_llm, verbose=True)\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"\n",
|
||||
@@ -47,20 +51,20 @@
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=docstore.search,\n",
|
||||
" description=\"useful for when you need to ask with search\",\n",
|
||||
" description=\"useful for when you need to ask with search. Must call before lookup.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Lookup\",\n",
|
||||
" func=docstore.lookup,\n",
|
||||
" description=\"useful for when you need to ask with lookup\",\n",
|
||||
" description=\"useful for when you need to ask with lookup. Only call after a successfull 'Search'.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for doing calculations\",\n",
|
||||
" description=\"useful for arithmetic. Expects strict numeric input, no words.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search the Web (SerpAPI)\",\n",
|
||||
" name=\"Search-the-Web-SerpAPI\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
@@ -70,12 +74,12 @@
|
||||
" memory_key=\"chat_history\", return_messages=True, output_key=\"output\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\")\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo-0613\")\n",
|
||||
"\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,\n",
|
||||
" agent=AgentType.OPENAI_FUNCTIONS,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=memory,\n",
|
||||
" return_intermediate_steps=True, # This is needed for the evaluation later\n",
|
||||
@@ -86,7 +90,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Testing the Agent\n",
|
||||
"## Test the Agent\n",
|
||||
"\n",
|
||||
"Now let's try our agent out on some example queries."
|
||||
]
|
||||
@@ -94,7 +98,9 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -102,16 +108,22 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Search the Web (SerpAPI)\",\n",
|
||||
" \"action_input\": \"How many ping pong balls would it take to fill the entire Empire State Building?\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3m12.8 billion. The volume of the Empire State Building Googles in at around 37 million ft³. A golf ball comes in at about 2.5 in³.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"It would take approximately 12.8 billion ping pong balls to fill the entire Empire State Building.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Calculator` with `1040000 / (4/100)^3 / 1000000`\n",
|
||||
"responded: {content}\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"1040000 / (4/100)^3 / 1000000\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"1040000 / (4/100)**3 / 1000000\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"1040000 / (4/100)**3 / 1000000\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m16249.999999999998\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3mAnswer: 16249.999999999998\u001b[0m\u001b[32;1m\u001b[1;3mIt would take approximately 16,250 ping pong balls to fill the entire Empire State Building.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -129,13 +141,15 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This looks good! Let's try it out on another query."
|
||||
"This looks alright.. Let's try it out on another query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -143,43 +157,49 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,876 Eiffel Towers.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Search` with `length of the US from coast to coast`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m\n",
|
||||
"== Watercraft ==\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Search` with `distance from coast to coast of the US`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mThe Oregon Coast is a coastal region of the U.S. state of Oregon. It is bordered by the Pacific Ocean to its west and the Oregon Coast Range to the east, and stretches approximately 362 miles (583 km) from the California state border in the south to the Columbia River in the north. The region is not a specific geological, environmental, or political entity, and includes the Columbia River Estuary.\n",
|
||||
"The Oregon Beach Bill of 1967 allows free beach access to everyone. In return for a pedestrian easement and relief from construction, the bill eliminates property taxes on private beach land and allows its owners to retain certain beach land rights.Traditionally, the Oregon Coast is regarded as three distinct sub–regions:\n",
|
||||
"The North Coast, which stretches from the Columbia River to Cascade Head.\n",
|
||||
"The Central Coast, which stretches from Cascade Head to Reedsport.\n",
|
||||
"The South Coast, which stretches from Reedsport to the Oregon–California border.The largest city is Coos Bay, population 16,700 in Coos County on the South Coast. U.S. Route 101 is the primary highway from Brookings to Astoria and is known for its scenic overlooks of the Pacific Ocean. Over 80 state parks and recreation areas dot the Oregon Coast. However, only a few highways cross the Coast Range to the interior: US 30, US 26, OR 6, US 20, OR 18, OR 34, OR 126, OR 38, and OR 42. OR 18 and US 20 are considered among the dangerous roads in the state.The Oregon Coast includes Clatsop County, Tillamook County, Lincoln County, western Lane County, western Douglas County, Coos County, and Curry County.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Calculator` with `362 miles * 5280 feet`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,876 Eiffel Towers.\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"4828000 / 324\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"4828000 / 324\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m14901.234567901234\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mAnswer: 14901.234567901234\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,901 Eiffel Towers.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"The length of the Eiffel Tower is 324 meters. The distance from coast to coast in the US is approximately 4,828 kilometers. First, we need to convert 4,828 kilometers to meters, which gives us 4,828,000 meters. To find out how many Eiffel Towers we need, we can divide 4,828,000 by 324. This gives us approximately 14,901 Eiffel Towers.\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"4828000 / 324\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"362 miles * 5280 feet\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"362 * 5280\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"4828000 / 324\")...\n",
|
||||
"...numexpr.evaluate(\"362 * 5280\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m14901.234567901234\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m1911360\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3mAnswer: 1911360\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `Calculator` with `1911360 feet / 1063 feet`\n",
|
||||
"\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mAnswer: 14901.234567901234\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"If you laid the Eiffel Tower end to end, you would need approximately 14,901 Eiffel Towers to cover the US from coast to coast.\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"1911360 feet / 1063 feet\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"1911360 / 1063\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"1911360 / 1063\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m1798.0809031044214\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[38;5;200m\u001b[1;3mAnswer: 1798.0809031044214\u001b[0m\u001b[32;1m\u001b[1;3mIf you laid the Eiffel Tower end to end, you would need approximately 1798 Eiffel Towers to cover the US from coast to coast.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -205,16 +225,17 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation.agents import TrajectoryEvalChain\n",
|
||||
"\n",
|
||||
"# Define chain\n",
|
||||
"eval_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")\n",
|
||||
"eval_chain = TrajectoryEvalChain.from_llm(\n",
|
||||
" llm=ChatOpenAI(\n",
|
||||
" temperature=0, model_name=\"gpt-4\"\n",
|
||||
" ), # Note: This must be a ChatOpenAI model\n",
|
||||
" llm=eval_llm, # Note: This must be a chat model\n",
|
||||
" agent_tools=agent.tools,\n",
|
||||
" return_reasoning=True,\n",
|
||||
")"
|
||||
@@ -237,17 +258,22 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Score from 1 to 5: 1\n",
|
||||
"Reasoning: First, let's evaluate the final answer. The final answer is incorrect because it uses the volume of golf balls instead of ping pong balls. The answer is not helpful.\n",
|
||||
"Reasoning: i. Is the final answer helpful?\n",
|
||||
"The final answer is not helpful because it is incorrect. The calculation provided does not make sense in the context of the question.\n",
|
||||
"\n",
|
||||
"Second, does the model use a logical sequence of tools to answer the question? The model only used one tool, which was the Search the Web (SerpAPI). It did not use the Calculator tool to calculate the correct volume of ping pong balls.\n",
|
||||
"ii. Does the AI language use a logical sequence of tools to answer the question?\n",
|
||||
"The AI language model does not use a logical sequence of tools. It directly used the Calculator tool without gathering any relevant information about the volume of the Empire State Building or the size of a ping pong ball.\n",
|
||||
"\n",
|
||||
"Third, does the AI language model use the tools in a helpful way? The model used the Search the Web (SerpAPI) tool, but the output was not helpful because it provided information about golf balls instead of ping pong balls.\n",
|
||||
"iii. Does the AI language model use the tools in a helpful way?\n",
|
||||
"The AI language model does not use the tools in a helpful way. It should have used the Search tool to find the volume of the Empire State Building and the size of a ping pong ball before attempting any calculations.\n",
|
||||
"\n",
|
||||
"Fourth, does the AI language model use too many steps to answer the question? The model used only one step, which is not too many. However, it should have used more steps to provide a correct answer.\n",
|
||||
"iv. Does the AI language model use too many steps to answer the question?\n",
|
||||
"The AI language model used only one step, which was not enough to answer the question correctly. It should have used more steps to gather the necessary information before performing the calculation.\n",
|
||||
"\n",
|
||||
"Fifth, are the appropriate tools used to answer the question? The model should have used the Search tool to find the volume of the Empire State Building and the volume of a ping pong ball. Then, it should have used the Calculator tool to calculate the number of ping pong balls needed to fill the building.\n",
|
||||
"v. Are the appropriate tools used to answer the question?\n",
|
||||
"The appropriate tools were not used to answer the question. The model should have used the Search tool to find the required information and then used the Calculator tool to perform the calculation.\n",
|
||||
"\n",
|
||||
"Judgment: Given the incorrect final answer and the inappropriate use of tools, we give the model a score of 1.\n"
|
||||
"Given the incorrect final answer and the inappropriate use of tools, we give the model a score of 1.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -258,12 +284,10 @@
|
||||
" test_outputs_one[\"output\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"evaluation = eval_chain(\n",
|
||||
" inputs={\n",
|
||||
" \"question\": question,\n",
|
||||
" \"answer\": answer,\n",
|
||||
" \"agent_trajectory\": eval_chain.get_agent_trajectory(steps),\n",
|
||||
" },\n",
|
||||
"evaluation = eval_chain.evaluate_agent_trajectory(\n",
|
||||
" input=test_outputs_one[\"input\"],\n",
|
||||
" output=test_outputs_one[\"output\"],\n",
|
||||
" agent_trajectory=test_outputs_one[\"intermediate_steps\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Score from 1 to 5: \", evaluation[\"score\"])\n",
|
||||
@@ -274,51 +298,97 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That seems about right. Let's try the second query."
|
||||
"**That seems about right. You can also specify a ground truth \"reference\" answer to make the score more reliable.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Score from 1 to 5: 1\n",
|
||||
"Reasoning: i. Is the final answer helpful?\n",
|
||||
"The final answer is not helpful, as it is incorrect. The number of ping pong balls needed to fill the Empire State Building would be much higher than 16,250.\n",
|
||||
"\n",
|
||||
"ii. Does the AI language use a logical sequence of tools to answer the question?\n",
|
||||
"The AI language model does not use a logical sequence of tools. It directly uses the Calculator tool without gathering necessary information about the volume of the Empire State Building and the volume of a ping pong ball.\n",
|
||||
"\n",
|
||||
"iii. Does the AI language model use the tools in a helpful way?\n",
|
||||
"The AI language model does not use the tools in a helpful way. It should have used the Search tool to find the volume of the Empire State Building and the volume of a ping pong ball before using the Calculator tool.\n",
|
||||
"\n",
|
||||
"iv. Does the AI language model use too many steps to answer the question?\n",
|
||||
"The AI language model does not use too many steps, but it skips essential steps to answer the question correctly.\n",
|
||||
"\n",
|
||||
"v. Are the appropriate tools used to answer the question?\n",
|
||||
"The appropriate tools are not used to answer the question. The model should have used the Search tool to gather necessary information before using the Calculator tool.\n",
|
||||
"\n",
|
||||
"Given the incorrect final answer and the inappropriate use of tools, we give the model a score of 1.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"evaluation = eval_chain.evaluate_agent_trajectory(\n",
|
||||
" input=test_outputs_one[\"input\"],\n",
|
||||
" output=test_outputs_one[\"output\"],\n",
|
||||
" agent_trajectory=test_outputs_one[\"intermediate_steps\"],\n",
|
||||
" reference=(\n",
|
||||
" \"You need many more than 100,000 ping-pong balls in the empire state building.\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"print(\"Score from 1 to 5: \", evaluation[\"score\"])\n",
|
||||
"print(\"Reasoning: \", evaluation[\"reasoning\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Let's try the second query. This time, use the async API. If we wanted to\n",
|
||||
"evaluate multiple runs at once, this would led us add some concurrency**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Score from 1 to 5: 3\n",
|
||||
"Score from 1 to 5: 2\n",
|
||||
"Reasoning: i. Is the final answer helpful?\n",
|
||||
"Yes, the final answer is helpful as it provides an approximate number of Eiffel Towers needed to cover the US from coast to coast.\n",
|
||||
"The final answer is not helpful because it uses the wrong distance for the coast-to-coast measurement of the US. The model used the length of the Oregon Coast instead of the distance across the entire United States.\n",
|
||||
"\n",
|
||||
"ii. Does the AI language use a logical sequence of tools to answer the question?\n",
|
||||
"No, the AI language model does not use a logical sequence of tools. It directly uses the Calculator tool without first using the Search or Lookup tools to find the necessary information (length of the Eiffel Tower and distance from coast to coast in the US).\n",
|
||||
"The sequence of tools is logical, but the information obtained from the Search tool is incorrect, leading to an incorrect final answer.\n",
|
||||
"\n",
|
||||
"iii. Does the AI language model use the tools in a helpful way?\n",
|
||||
"The AI language model uses the Calculator tool in a helpful way to perform the calculation, but it should have used the Search or Lookup tools first to find the required information.\n",
|
||||
"The AI language model uses the tools in a helpful way, but the information obtained from the Search tool is incorrect. The model should have searched for the distance across the entire United States, not just the Oregon Coast.\n",
|
||||
"\n",
|
||||
"iv. Does the AI language model use too many steps to answer the question?\n",
|
||||
"No, the AI language model does not use too many steps. However, it repeats the same step twice, which is unnecessary.\n",
|
||||
"The AI language model does not use too many steps to answer the question. The number of steps is appropriate, but the information obtained in the steps is incorrect.\n",
|
||||
"\n",
|
||||
"v. Are the appropriate tools used to answer the question?\n",
|
||||
"Not entirely. The AI language model should have used the Search or Lookup tools to find the required information before using the Calculator tool.\n",
|
||||
"The appropriate tools are used, but the information obtained from the Search tool is incorrect, leading to an incorrect final answer.\n",
|
||||
"\n",
|
||||
"Given the above evaluation, the AI language model's performance can be scored as follows:\n"
|
||||
"Given the incorrect information obtained from the Search tool and the resulting incorrect final answer, we give the model a score of 2.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question, steps, answer = (\n",
|
||||
" test_outputs_two[\"input\"],\n",
|
||||
" test_outputs_two[\"intermediate_steps\"],\n",
|
||||
" test_outputs_two[\"output\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"evaluation = eval_chain(\n",
|
||||
" inputs={\n",
|
||||
" \"question\": question,\n",
|
||||
" \"answer\": answer,\n",
|
||||
" \"agent_trajectory\": eval_chain.get_agent_trajectory(steps),\n",
|
||||
" },\n",
|
||||
"evaluation = await eval_chain.aevaluate_agent_trajectory(\n",
|
||||
" input=test_outputs_two[\"input\"],\n",
|
||||
" output=test_outputs_two[\"output\"],\n",
|
||||
" agent_trajectory=test_outputs_two[\"intermediate_steps\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Score from 1 to 5: \", evaluation[\"score\"])\n",
|
||||
@@ -329,7 +399,11 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"That also sounds about right. In conclusion, the TrajectoryEvalChain allows us to use GPT-4 to score both our agent's outputs and tool use in addition to giving us the reasoning behind the evaluation."
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In this example, you evaluated an agent based its entire \"trajectory\" using the `TrajectoryEvalChain`. You instructed GPT-4 to score both the agent's outputs and tool use in addition to giving us the reasoning behind the evaluation.\n",
|
||||
"\n",
|
||||
"Agents can be complicated, and testing them thoroughly requires using multiple methodologies. Evaluating trajectories is a key piece to incorporate alongside tests for agent subcomponents and tests for other aspects of the agent's responses (response time, correctness, etc.) "
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -349,7 +423,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
@@ -358,5 +432,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Evaluating an OpenAPI Chain\n",
|
||||
"\n",
|
||||
"This notebook goes over ways to semantically evaluate an [OpenAPI Chain](/docs/modules/chains/additiona/openapi.html), which calls an endpoint defined by the OpenAPI specification using purely natural language."
|
||||
"This notebook goes over ways to semantically evaluate an [OpenAPI Chain](/docs/modules/chains/additional/openapi.html), which calls an endpoint defined by the OpenAPI specification using purely natural language."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -0,0 +1,386 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "g9EmNu5DD9YI"
|
||||
},
|
||||
"source": [
|
||||
"# Custom functions with OpenAI Functions Agent\n",
|
||||
"\n",
|
||||
"This notebook goes through how to integrate custom functions with OpenAI Functions agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "LFKylC3CPtTl"
|
||||
},
|
||||
"source": [
|
||||
"Install libraries which are required to run this example notebook\n",
|
||||
"\n",
|
||||
"`pip install -q openai langchain yfinance`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "E2DqzmEGDPak"
|
||||
},
|
||||
"source": [
|
||||
"## Define custom functions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "SiucthMs6SIK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import yfinance as yf\n",
|
||||
"from datetime import datetime, timedelta\n",
|
||||
"\n",
|
||||
"def get_current_stock_price(ticker):\n",
|
||||
" \"\"\"Method to get current stock price\"\"\"\n",
|
||||
"\n",
|
||||
" ticker_data = yf.Ticker(ticker)\n",
|
||||
" recent = ticker_data.history(period='1d')\n",
|
||||
" return {\n",
|
||||
" 'price': recent.iloc[0]['Close'],\n",
|
||||
" 'currency': ticker_data.info['currency']\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"def get_stock_performance(ticker, days):\n",
|
||||
" \"\"\"Method to get stock price change in percentage\"\"\"\n",
|
||||
"\n",
|
||||
" past_date = datetime.today() - timedelta(days=days)\n",
|
||||
" ticker_data = yf.Ticker(ticker)\n",
|
||||
" history = ticker_data.history(start=past_date)\n",
|
||||
" old_price = history.iloc[0]['Close']\n",
|
||||
" current_price = history.iloc[-1]['Close']\n",
|
||||
" return {\n",
|
||||
" 'percent_change': ((current_price - old_price)/old_price)*100\n",
|
||||
" }"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "vRLINGvQR1rO",
|
||||
"outputId": "68230a4b-dda2-4273-b956-7439661e3785"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'price': 334.57000732421875, 'currency': 'USD'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"get_current_stock_price('MSFT')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "57T190q235mD",
|
||||
"outputId": "c6ee66ec-0659-4632-85d1-263b08826e68"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'percent_change': 1.014466941163018}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"get_stock_performance('MSFT', 30)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MT8QsdyBDhwg"
|
||||
},
|
||||
"source": [
|
||||
"## Make custom tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"id": "NvLOUv-XP3Ap"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Type\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from langchain.tools import BaseTool\n",
|
||||
"\n",
|
||||
"class CurrentStockPriceInput(BaseModel):\n",
|
||||
" \"\"\"Inputs for get_current_stock_price\"\"\"\n",
|
||||
" ticker: str = Field(description=\"Ticker symbol of the stock\")\n",
|
||||
"\n",
|
||||
"class CurrentStockPriceTool(BaseTool):\n",
|
||||
" name = \"get_current_stock_price\"\n",
|
||||
" description = \"\"\"\n",
|
||||
" Useful when you want to get current stock price.\n",
|
||||
" You should enter the stock ticker symbol recognized by the yahoo finance\n",
|
||||
" \"\"\"\n",
|
||||
" args_schema: Type[BaseModel] = CurrentStockPriceInput\n",
|
||||
"\n",
|
||||
" def _run(self, ticker: str):\n",
|
||||
" price_response = get_current_stock_price(ticker)\n",
|
||||
" return price_response\n",
|
||||
"\n",
|
||||
" def _arun(self, ticker: str):\n",
|
||||
" raise NotImplementedError(\"get_current_stock_price does not support async\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class StockPercentChangeInput(BaseModel):\n",
|
||||
" \"\"\"Inputs for get_stock_performance\"\"\"\n",
|
||||
" ticker: str = Field(description=\"Ticker symbol of the stock\")\n",
|
||||
" days: int = Field(description='Timedelta days to get past date from current date')\n",
|
||||
"\n",
|
||||
"class StockPerformanceTool(BaseTool):\n",
|
||||
" name = \"get_stock_performance\"\n",
|
||||
" description = \"\"\"\n",
|
||||
" Useful when you want to check performance of the stock.\n",
|
||||
" You should enter the stock ticker symbol recognized by the yahoo finance.\n",
|
||||
" You should enter days as number of days from today from which performance needs to be check.\n",
|
||||
" output will be the change in the stock price represented as a percentage.\n",
|
||||
" \"\"\"\n",
|
||||
" args_schema: Type[BaseModel] = StockPercentChangeInput\n",
|
||||
"\n",
|
||||
" def _run(self, ticker: str, days: int):\n",
|
||||
" response = get_stock_performance(ticker, days)\n",
|
||||
" return response\n",
|
||||
"\n",
|
||||
" def _arun(self, ticker: str):\n",
|
||||
" raise NotImplementedError(\"get_stock_performance does not support async\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "PVKoqeCyFKHF"
|
||||
},
|
||||
"source": [
|
||||
"## Create Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"id": "yY7qNB7vSQGh"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0613\",\n",
|
||||
" temperature=0\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"tools = [\n",
|
||||
" CurrentStockPriceTool(),\n",
|
||||
" StockPerformanceTool()\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 321
|
||||
},
|
||||
"id": "4X96xmgwRkcC",
|
||||
"outputId": "a91b13ef-9643-4f60-d067-c4341e0b285e"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `get_current_stock_price` with `{'ticker': 'MSFT'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m{'price': 334.57000732421875, 'currency': 'USD'}\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `get_stock_performance` with `{'ticker': 'MSFT', 'days': 180}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3m{'percent_change': 40.163963297187905}\u001b[0m\u001b[32;1m\u001b[1;3mThe current price of Microsoft stock is $334.57 USD. \n",
|
||||
"\n",
|
||||
"Over the past 6 months, Microsoft stock has performed well with a 40.16% increase in its price.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current price of Microsoft stock is $334.57 USD. \\n\\nOver the past 6 months, Microsoft stock has performed well with a 40.16% increase in its price.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What is the current price of Microsoft stock? How it has performed over past 6 months?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 285
|
||||
},
|
||||
"id": "nkZ_vmAcT7Al",
|
||||
"outputId": "092ebc55-4d28-4a4b-aa2a-98ae47ceec20"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `get_current_stock_price` with `{'ticker': 'GOOGL'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m{'price': 118.33000183105469, 'currency': 'USD'}\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `get_current_stock_price` with `{'ticker': 'META'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m{'price': 287.04998779296875, 'currency': 'USD'}\u001b[0m\u001b[32;1m\u001b[1;3mThe recent stock price of Google (GOOGL) is $118.33 USD and the recent stock price of Meta (META) is $287.05 USD.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The recent stock price of Google (GOOGL) is $118.33 USD and the recent stock price of Meta (META) is $287.05 USD.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Give me recent stock prices of Google and Meta?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 466
|
||||
},
|
||||
"id": "jLU-HjMq7n1o",
|
||||
"outputId": "a42194dd-26ed-4b5a-d4a2-1038420045c4"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `get_stock_performance` with `{'ticker': 'MSFT', 'days': 90}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3m{'percent_change': 18.043096235165596}\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `get_stock_performance` with `{'ticker': 'GOOGL', 'days': 90}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3m{'percent_change': 17.286155760642853}\u001b[0m\u001b[32;1m\u001b[1;3mIn the past 3 months, Microsoft (MSFT) has performed better than Google (GOOGL). Microsoft's stock price has increased by 18.04% while Google's stock price has increased by 17.29%.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"In the past 3 months, Microsoft (MSFT) has performed better than Google (GOOGL). Microsoft's stock price has increased by 18.04% while Google's stock price has increased by 17.29%.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run('In the past 3 months, which stock between Microsoft and Google has performed the best?')"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Zapier Natural Language Actions API\n",
|
||||
"\\\n",
|
||||
"Full docs here: https://nla.zapier.com/api/v1/docs\n",
|
||||
"Full docs here: https://nla.zapier.com/start/\n",
|
||||
"\n",
|
||||
"**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface.\n",
|
||||
"\n",
|
||||
@@ -21,7 +21,7 @@
|
||||
"\n",
|
||||
"2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com\n",
|
||||
"\n",
|
||||
"This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/api/v1/docs) or reach out to nla@zapier.com for user-facing oauth developer support.\n",
|
||||
"This quick start will focus mostly on the server-side use case for brevity. Jump to [Example Using OAuth Access Token](#oauth) to see a short example how to set up Zapier for user-facing situations. Review [full docs](https://nla.zapier.com/start/) for full user-facing oauth developer support.\n",
|
||||
"\n",
|
||||
"This example goes over how to use the Zapier integration with a `SimpleSequentialChain`, then an `Agent`.\n",
|
||||
"In code, below:"
|
||||
@@ -39,7 +39,7 @@
|
||||
"# get from https://platform.openai.com/\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\", \"\")\n",
|
||||
"\n",
|
||||
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in):\n",
|
||||
"# get from https://nla.zapier.com/docs/authentication/ after logging in):\n",
|
||||
"os.environ[\"ZAPIER_NLA_API_KEY\"] = os.environ.get(\"ZAPIER_NLA_API_KEY\", \"\")"
|
||||
]
|
||||
},
|
||||
@@ -149,7 +149,7 @@
|
||||
"id": "bcdea831",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Example with SimpleSequentialChain\n",
|
||||
"## Example with SimpleSequentialChain\n",
|
||||
"If you need more explicit control, use a chain, like below."
|
||||
]
|
||||
},
|
||||
@@ -323,12 +323,34 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"id": "09ff954e-45f2-4595-92ea-91627abde4a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## <a id=\"oauth\">Example Using OAuth Access Token</a>\n",
|
||||
"The below snippet shows how to initialize the wrapper with a procured OAuth access token. Note the argument being passed in as opposed to setting an environment variable. Review the [authentication docs](https://nla.zapier.com/docs/authentication/#oauth-credentials) for full user-facing oauth developer support.\n",
|
||||
"\n",
|
||||
"The developer is tasked with handling the OAuth handshaking to procure and refresh the access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7c6835c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token='<fill in access token here>')\n",
|
||||
"toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent.run(\n",
|
||||
" \"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
73
docs/extras/modules/callbacks/integrations/streamlit.md
Normal file
73
docs/extras/modules/callbacks/integrations/streamlit.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Streamlit
|
||||
|
||||
> **[Streamlit](https://streamlit.io/) is a faster way to build and share data apps.**
|
||||
> Streamlit turns data scripts into shareable web apps in minutes. All in pure Python. No front‑end experience required.
|
||||
> See more examples at [streamlit.io/generative-ai](https://streamlit.io/generative-ai).
|
||||
|
||||
[](https://codespaces.new/langchain-ai/streamlit-agent?quickstart=1)
|
||||
|
||||
In this guide we will demonstrate how to use `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an
|
||||
interactive Streamlit app. Try it out with the running app below using the [MRKL agent](/docs/modules/agents/how_to/mrkl/):
|
||||
|
||||
<iframe loading="lazy" src="https://mrkl-minimal.streamlit.app/?embed=true&embed_options=light_theme"
|
||||
style={{ width: 100 + '%', border: 'none', marginBottom: 1 + 'rem', height: 600 }}
|
||||
allow="camera;clipboard-read;clipboard-write;"
|
||||
></iframe>
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain streamlit
|
||||
```
|
||||
|
||||
You can run `streamlit hello` to load a sample app and validate your install succeeded. See full instructions in Streamlit's
|
||||
[Getting started documentation](https://docs.streamlit.io/library/get-started).
|
||||
|
||||
## Display thoughts and actions
|
||||
|
||||
To create a `StreamlitCallbackHandler`, you just need to provide a parent container to render the output.
|
||||
|
||||
```python
|
||||
from langchain.callbacks import StreamlitCallbackHandler
|
||||
import streamlit as st
|
||||
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
```
|
||||
|
||||
Additional keyword arguments to customize the display behavior are described in the
|
||||
[API reference](https://api.python.langchain.com/en/latest/modules/callbacks.html#langchain.callbacks.StreamlitCallbackHandler).
|
||||
|
||||
### Scenario 1: Using an Agent with Tools
|
||||
|
||||
The primary supported use case today is visualizing the actions of an Agent with Tools (or Agent Executor). You can create an
|
||||
agent in your Streamlit app and simply pass the `StreamlitCallbackHandler` to `agent.run()` in order to visualize the
|
||||
thoughts and actions live in your app.
|
||||
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain.callbacks import StreamlitCallbackHandler
|
||||
import streamlit as st
|
||||
|
||||
llm = OpenAI(temperature=0, streaming=True)
|
||||
tools = load_tools(["ddg-search"])
|
||||
agent = initialize_agent(
|
||||
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
||||
)
|
||||
|
||||
if prompt := st.chat_input():
|
||||
st.chat_message("user").write(prompt)
|
||||
with st.chat_message("assistant"):
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
response = agent.run(prompt, callbacks=[st_callback])
|
||||
st.write(response)
|
||||
```
|
||||
|
||||
**Note:** You will need to set `OPENAI_API_KEY` for the above app code to run successfully.
|
||||
The easiest way to do this is via [Streamlit secrets.toml](https://docs.streamlit.io/library/advanced-features/secrets-management),
|
||||
or any other local ENV management tool.
|
||||
|
||||
### Additional scenarios
|
||||
|
||||
Currently `StreamlitCallbackHandler` is geared towards use with a LangChain Agent Executor. Support for additional agent types,
|
||||
use directly with Chains, etc will be added in the future.
|
||||
@@ -71,11 +71,13 @@
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"from langchain.schema import BaseRetriever\n",
|
||||
"from langchain.callbacks.manager import AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun\n",
|
||||
"from langchain.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.schema import Document"
|
||||
"from langchain.schema import Document\n",
|
||||
"from typing import Any"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -97,11 +99,16 @@
|
||||
" def __init__(self, search):\n",
|
||||
" self.search = search\n",
|
||||
"\n",
|
||||
" def get_relevant_documents(self, query: str):\n",
|
||||
" def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) -> List[Document]:\n",
|
||||
" return [Document(page_content=self.search.run(query))]\n",
|
||||
"\n",
|
||||
" async def aget_relevant_documents(self, query: str):\n",
|
||||
" raise NotImplemented\n",
|
||||
" async def _aget_relevant_documents(self,\n",
|
||||
" query: str,\n",
|
||||
" *,\n",
|
||||
" run_manager: AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> List[Document]:\n",
|
||||
" raise NotImplementedError()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever = SerperSearchRetriever(GoogleSerperAPIWrapper())"
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 1,
|
||||
"id": "40cd9806",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -44,7 +44,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 2,
|
||||
"id": "2d20b852",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "579fa702",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -68,7 +68,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"id": "90c1d899",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -80,7 +80,7 @@
|
||||
"[Document(page_content='This is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': 'example_data/fake-email.eml'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -128,7 +128,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='This is a test email to use for unit tests.', lookup_str='', metadata={'source': 'example_data/fake-email.eml'}, lookup_index=0)"
|
||||
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'filename': 'fake-email.eml', 'file_directory': 'example_data', 'date': '2022-12-16T17:04:16-05:00', 'filetype': 'message/rfc822', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'category': 'NarrativeText'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
@@ -140,6 +140,61 @@
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5021f20a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Processing Attachments\n",
|
||||
"\n",
|
||||
"You can process attachments with `UnstructuredEmailLoader` by setting `process_attachments=True` in the constructor. By default, attachments will be partitioned using the `partition` function from `unstructured`. You can use a different partitioning function by passing the function to the `attachment_partitioner` kwarg."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "6539f166",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredEmailLoader(\n",
|
||||
" \"example_data/fake-email.eml\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" process_attachments=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "aebead38",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ddeb60f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='This is a test email to use for unit tests.', metadata={'source': 'example_data/fake-email.eml', 'filename': 'fake-email.eml', 'file_directory': 'example_data', 'date': '2022-12-16T17:04:16-05:00', 'filetype': 'message/rfc822', 'sent_from': ['Matthew Robinson <mrobinson@unstructured.io>'], 'sent_to': ['Matthew Robinson <mrobinson@unstructured.io>'], 'subject': 'Test Email', 'category': 'NarrativeText'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a074515",
|
||||
@@ -234,7 +289,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
* Example Docs
|
||||
|
||||
The sample docs directory contains the following files:
|
||||
|
||||
- ~example-10k.html~ - A 10-K SEC filing in HTML format
|
||||
- ~layout-parser-paper.pdf~ - A PDF copy of the layout parser paper
|
||||
- ~factbook.xml~ / ~factbook.xsl~ - Example XML/XLS files that you
|
||||
can use to test stylesheets
|
||||
|
||||
These documents can be used to test out the parsers in the library. In
|
||||
addition, here are instructions for pulling in some sample docs that are
|
||||
too big to store in the repo.
|
||||
|
||||
** XBRL 10-K
|
||||
|
||||
You can get an example 10-K in inline XBRL format using the following
|
||||
~curl~. Note, you need to have the user agent set in the header or the
|
||||
SEC site will reject your request.
|
||||
|
||||
#+BEGIN_SRC bash
|
||||
|
||||
curl -O \
|
||||
-A '${organization} ${email}'
|
||||
https://www.sec.gov/Archives/edgar/data/311094/000117184321001344/0001171843-21-001344.txt
|
||||
#+END_SRC
|
||||
|
||||
You can parse this document using the HTML parser.
|
||||
@@ -0,0 +1,50 @@
|
||||
MIME-Version: 1.0
|
||||
Date: Fri, 23 Dec 2022 12:08:48 -0600
|
||||
Message-ID: <CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>
|
||||
Subject: Fake email with attachment
|
||||
From: Mallori Harrell <mallori@unstructured.io>
|
||||
To: Mallori Harrell <mallori@unstructured.io>
|
||||
Content-Type: multipart/mixed; boundary="0000000000005d654405f082adb7"
|
||||
|
||||
--0000000000005d654405f082adb7
|
||||
Content-Type: multipart/alternative; boundary="0000000000005d654205f082adb5"
|
||||
|
||||
--0000000000005d654205f082adb5
|
||||
Content-Type: text/plain; charset="UTF-8"
|
||||
|
||||
Hello!
|
||||
|
||||
Here's the attachments!
|
||||
|
||||
It includes:
|
||||
|
||||
- Lots of whitespace
|
||||
- Little to no content
|
||||
- and is a quick read
|
||||
|
||||
Best,
|
||||
|
||||
Mallori
|
||||
|
||||
--0000000000005d654205f082adb5
|
||||
Content-Type: text/html; charset="UTF-8"
|
||||
Content-Transfer-Encoding: quoted-printable
|
||||
|
||||
<div dir=3D"ltr">Hello!=C2=A0<div><br></div><div>Here's the attachments=
|
||||
!</div><div><br></div><div>It includes:</div><div><ul><li style=3D"margin-l=
|
||||
eft:15px">Lots of whitespace</li><li style=3D"margin-left:15px">Little=C2=
|
||||
=A0to no content</li><li style=3D"margin-left:15px">and is a quick read</li=
|
||||
></ul><div>Best,</div></div><div><br></div><div>Mallori</div><div dir=3D"lt=
|
||||
r" class=3D"gmail_signature" data-smartmail=3D"gmail_signature"><div dir=3D=
|
||||
"ltr"><div><div><br></div></div></div></div></div>
|
||||
|
||||
--0000000000005d654205f082adb5--
|
||||
--0000000000005d654405f082adb7
|
||||
Content-Type: text/plain; charset="US-ASCII"; name="fake-attachment.txt"
|
||||
Content-Disposition: attachment; filename="fake-attachment.txt"
|
||||
Content-Transfer-Encoding: base64
|
||||
X-Attachment-Id: f_lc0tto5j0
|
||||
Content-ID: <f_lc0tto5j0>
|
||||
|
||||
SGV5IHRoaXMgaXMgYSBmYWtlIGF0dGFjaG1lbnQh
|
||||
--0000000000005d654405f082adb7--
|
||||
@@ -0,0 +1,17 @@
|
||||
class MyClass {
|
||||
constructor(name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
greet() {
|
||||
console.log(`Hello, ${this.name}!`);
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
const name = prompt("Enter your name:");
|
||||
const obj = new MyClass(name);
|
||||
obj.greet();
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -0,0 +1,16 @@
|
||||
class MyClass:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def greet(self):
|
||||
print(f"Hello, {self.name}!")
|
||||
|
||||
|
||||
def main():
|
||||
name = input("Enter your name: ")
|
||||
obj = MyClass(name)
|
||||
obj.greet()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,180 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bdccb278",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Grobid\n",
|
||||
"\n",
|
||||
"GROBID is a machine learning library for extracting, parsing, and re-structuring raw documents.\n",
|
||||
"\n",
|
||||
"It is particularly good for sturctured PDFs, like academic papers.\n",
|
||||
"\n",
|
||||
"This loader uses GROBIB to parse PDFs into `Documents` that retain metadata associated with the section of text.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"For users on `Mac` - \n",
|
||||
"\n",
|
||||
"(Note: additional instructions can be found [here](https://python.langchain.com/docs/ecosystem/integrations/grobid.mdx).)\n",
|
||||
"\n",
|
||||
"Install Java (Apple Silicon):\n",
|
||||
"```\n",
|
||||
"$ arch -arm64 brew install openjdk@11\n",
|
||||
"$ brew --prefix openjdk@11\n",
|
||||
"/opt/homebrew/opt/openjdk@ 11\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"In `~/.zshrc`:\n",
|
||||
"```\n",
|
||||
"export JAVA_HOME=/opt/homebrew/opt/openjdk@11\n",
|
||||
"export PATH=$JAVA_HOME/bin:$PATH\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then, in Terminal:\n",
|
||||
"```\n",
|
||||
"$ source ~/.zshrc\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Confirm install:\n",
|
||||
"```\n",
|
||||
"$ which java\n",
|
||||
"/opt/homebrew/opt/openjdk@11/bin/java\n",
|
||||
"$ java -version \n",
|
||||
"openjdk version \"11.0.19\" 2023-04-18\n",
|
||||
"OpenJDK Runtime Environment Homebrew (build 11.0.19+0)\n",
|
||||
"OpenJDK 64-Bit Server VM Homebrew (build 11.0.19+0, mixed mode)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then, get [Grobid](https://grobid.readthedocs.io/en/latest/Install-Grobid/#getting-grobid):\n",
|
||||
"```\n",
|
||||
"$ curl -LO https://github.com/kermitt2/grobid/archive/0.7.3.zip\n",
|
||||
"$ unzip 0.7.3.zip\n",
|
||||
"```\n",
|
||||
" \n",
|
||||
"Build\n",
|
||||
"```\n",
|
||||
"$ ./gradlew clean install\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then, run the server:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2d8992fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! get_ipython().system_raw('nohup ./gradlew run > grobid.log 2>&1 &')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b41bfb1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can use the data loader."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "640e9a4b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.parsers import GrobidParser\n",
|
||||
"from langchain.document_loaders.generic import GenericLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ecdc1fb9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"../Papers/\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".pdf\"],\n",
|
||||
" parser= GrobidParser(segment_sentences=False)\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "efe9e356",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Unlike Chinchilla, PaLM, or GPT-3, we only use publicly available data, making our work compatible with open-sourcing, while most existing models rely on data which is either not publicly available or undocumented (e.g.\"Books -2TB\" or \"Social media conversations\").There exist some exceptions, notably OPT (Zhang et al., 2022), GPT-NeoX (Black et al., 2022), BLOOM (Scao et al., 2022) and GLM (Zeng et al., 2022), but none that are competitive with PaLM-62B or Chinchilla.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[3].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "5be03d17",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'text': 'Unlike Chinchilla, PaLM, or GPT-3, we only use publicly available data, making our work compatible with open-sourcing, while most existing models rely on data which is either not publicly available or undocumented (e.g.\"Books -2TB\" or \"Social media conversations\").There exist some exceptions, notably OPT (Zhang et al., 2022), GPT-NeoX (Black et al., 2022), BLOOM (Scao et al., 2022) and GLM (Zeng et al., 2022), but none that are competitive with PaLM-62B or Chinchilla.',\n",
|
||||
" 'para': '2',\n",
|
||||
" 'bboxes': \"[[{'page': '1', 'x': '317.05', 'y': '509.17', 'h': '207.73', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '522.72', 'h': '220.08', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '536.27', 'h': '218.27', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '549.82', 'h': '218.65', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '563.37', 'h': '136.98', 'w': '9.46'}], [{'page': '1', 'x': '446.49', 'y': '563.37', 'h': '78.11', 'w': '9.46'}, {'page': '1', 'x': '304.69', 'y': '576.92', 'h': '138.32', 'w': '9.46'}], [{'page': '1', 'x': '447.75', 'y': '576.92', 'h': '76.66', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '590.47', 'h': '219.63', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '604.02', 'h': '218.27', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '617.56', 'h': '218.27', 'w': '9.46'}, {'page': '1', 'x': '306.14', 'y': '631.11', 'h': '220.18', 'w': '9.46'}]]\",\n",
|
||||
" 'pages': \"('1', '1')\",\n",
|
||||
" 'section_title': 'Introduction',\n",
|
||||
" 'section_number': '1',\n",
|
||||
" 'paper_title': 'LLaMA: Open and Efficient Foundation Language Models',\n",
|
||||
" 'file_path': '/Users/31treehaus/Desktop/Papers/2302.13971.pdf'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[3].metadata"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "33205b12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LarkSuite (FeiShu)\n",
|
||||
"\n",
|
||||
">[LarkSuite](https://www.larksuite.com/) is an enterprise collaboration platform developed by ByteDance.\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from the `LarkSuite` REST API into a format that can be ingested into LangChain, along with example usage for text summarization.\n",
|
||||
"\n",
|
||||
"The LarkSuite API requires an access token (tenant_access_token or user_access_token), checkout [LarkSuite open platform document](https://open.larksuite.com/document) for API details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "90b69c94",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-19T10:05:03.645161Z",
|
||||
"start_time": "2023-06-19T10:04:49.541968Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"from langchain.document_loaders.larksuite import LarkSuiteDocLoader\n",
|
||||
"\n",
|
||||
"DOMAIN = input(\"larksuite domain\")\n",
|
||||
"ACCESS_TOKEN = getpass(\"larksuite tenant_access_token or user_access_token\")\n",
|
||||
"DOCUMENT_ID = input(\"larksuite document id\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "13deb0f5",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-19T10:05:36.016495Z",
|
||||
"start_time": "2023-06-19T10:05:35.360884Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Test Doc\\nThis is a Test Doc\\n\\n1\\n2\\n3\\n\\n', metadata={'document_id': 'V76kdbd2HoBbYJxdiNNccajunPf', 'revision_id': 11, 'title': 'Test Doc'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"larksuite_loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)\n",
|
||||
"docs = larksuite_loader.load()\n",
|
||||
"\n",
|
||||
"pprint(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ccc1e2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# see https://python.langchain.com/docs/use_cases/summarization for more details\n",
|
||||
"from langchain.chains.summarize import load_summarize_chain\n",
|
||||
"\n",
|
||||
"chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"chain.run(docs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Org-mode\n",
|
||||
"\n",
|
||||
">A [Org Mode document](https://en.wikipedia.org/wiki/Org-mode) is a document editing, formatting, and organizing mode, designed for notes, planning, and authoring within the free software text editor Emacs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `UnstructuredOrgModeLoader`\n",
|
||||
"\n",
|
||||
"You can load data from Org-mode files with `UnstructuredOrgModeLoader` using the following workflow."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredOrgModeLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredOrgModeLoader(\n",
|
||||
" file_path=\"example_data/README.org\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Example Docs' metadata={'source': 'example_data/README.org', 'filename': 'README.org', 'file_directory': 'example_data', 'filetype': 'text/org', 'page_number': 1, 'category': 'Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5a7cc773",
|
||||
"metadata": {},
|
||||
@@ -17,7 +18,7 @@
|
||||
"\n",
|
||||
"But, the challenge is traversing the tree of child pages and actually assembling that list!\n",
|
||||
" \n",
|
||||
"We do this using the `RecusiveUrlLoader`.\n",
|
||||
"We do this using the `RecursiveUrlLoader`.\n",
|
||||
"\n",
|
||||
"This also gives us the flexibility to exclude some children (e.g., the `api` directory with > 800 child pages)."
|
||||
]
|
||||
@@ -29,10 +30,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.recursive_url_loader import RecusiveUrlLoader"
|
||||
"from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "6384c057",
|
||||
"metadata": {},
|
||||
@@ -48,7 +50,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"url = 'https://js.langchain.com/docs/modules/memory/examples/'\n",
|
||||
"loader=RecusiveUrlLoader(url=url)\n",
|
||||
"loader=RecursiveUrlLoader(url=url)\n",
|
||||
"docs=loader.load()"
|
||||
]
|
||||
},
|
||||
@@ -119,6 +121,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "40fc13ef",
|
||||
"metadata": {},
|
||||
@@ -137,7 +140,7 @@
|
||||
"source": [
|
||||
"url = 'https://js.langchain.com/docs/'\n",
|
||||
"exclude_dirs=['https://js.langchain.com/docs/api/']\n",
|
||||
"loader=RecusiveUrlLoader(url=url,exclude_dirs=exclude_dirs)\n",
|
||||
"loader=RecursiveUrlLoader(url=url,exclude_dirs=exclude_dirs)\n",
|
||||
"docs=loader.load()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -0,0 +1,419 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "213a38a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Source Code\n",
|
||||
"\n",
|
||||
"This notebook covers how to load source code files using a special approach with language parsing: each top-level function and class in the code is loaded into separate documents. Any remaining code top-level code outside the already loaded functions and classes will be loaded into a seperate document.\n",
|
||||
"\n",
|
||||
"This approach can potentially improve the accuracy of QA models over source code. Currently, the supported languages for code parsing are Python and JavaScript. The language used for parsing can be configured, along with the minimum number of lines required to activate the splitting based on syntax."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7fa47b2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install esprima"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "beb55c2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import warnings\n",
|
||||
"warnings.filterwarnings('ignore')\n",
|
||||
"from pprint import pprint\n",
|
||||
"from langchain.text_splitter import Language\n",
|
||||
"from langchain.document_loaders.generic import GenericLoader\n",
|
||||
"from langchain.document_loaders.parsers import LanguageParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64056e07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"./example_data/source_code\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".py\", \".js\"],\n",
|
||||
" parser=LanguageParser()\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8af79bd7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"6"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "85edf3fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.PYTHON: 'python'>,\n",
|
||||
" 'source': 'example_data/source_code/example.py'}\n",
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.PYTHON: 'python'>,\n",
|
||||
" 'source': 'example_data/source_code/example.py'}\n",
|
||||
"{'content_type': 'simplified_code',\n",
|
||||
" 'language': <Language.PYTHON: 'python'>,\n",
|
||||
" 'source': 'example_data/source_code/example.py'}\n",
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.JS: 'js'>,\n",
|
||||
" 'source': 'example_data/source_code/example.js'}\n",
|
||||
"{'content_type': 'functions_classes',\n",
|
||||
" 'language': <Language.JS: 'js'>,\n",
|
||||
" 'source': 'example_data/source_code/example.js'}\n",
|
||||
"{'content_type': 'simplified_code',\n",
|
||||
" 'language': <Language.JS: 'js'>,\n",
|
||||
" 'source': 'example_data/source_code/example.js'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for document in docs:\n",
|
||||
" pprint(document.metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f44e3e37",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"class MyClass:\n",
|
||||
" def __init__(self, name):\n",
|
||||
" self.name = name\n",
|
||||
"\n",
|
||||
" def greet(self):\n",
|
||||
" print(f\"Hello, {self.name}!\")\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" name = input(\"Enter your name: \")\n",
|
||||
" obj = MyClass(name)\n",
|
||||
" obj.greet()\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"# Code for: class MyClass:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Code for: def main():\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" main()\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"class MyClass {\n",
|
||||
" constructor(name) {\n",
|
||||
" this.name = name;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" greet() {\n",
|
||||
" console.log(`Hello, ${this.name}!`);\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"function main() {\n",
|
||||
" const name = prompt(\"Enter your name:\");\n",
|
||||
" const obj = new MyClass(name);\n",
|
||||
" obj.greet();\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"// Code for: class MyClass {\n",
|
||||
"\n",
|
||||
"// Code for: function main() {\n",
|
||||
"\n",
|
||||
"main();\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\"\\n\\n--8<--\\n\\n\".join([document.page_content for document in docs]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69aad0ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The parser can be disabled for small files. \n",
|
||||
"\n",
|
||||
"The parameter `parser_threshold` indicates the minimum number of lines that the source code file must have to be segmented using the parser."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ae024794",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"./example_data/source_code\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".py\"],\n",
|
||||
" parser=LanguageParser(language=Language.PYTHON, parser_threshold=1000)\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5d3b372a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "89e546ad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"class MyClass:\n",
|
||||
" def __init__(self, name):\n",
|
||||
" self.name = name\n",
|
||||
"\n",
|
||||
" def greet(self):\n",
|
||||
" print(f\"Hello, {self.name}!\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" name = input(\"Enter your name: \")\n",
|
||||
" obj = MyClass(name)\n",
|
||||
" obj.greet()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" main()\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9c71e61",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Splitting\n",
|
||||
"\n",
|
||||
"Additional splitting could be needed for those functions, classes, or scripts that are too big."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "adbaa79f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GenericLoader.from_filesystem(\n",
|
||||
" \"./example_data/source_code\",\n",
|
||||
" glob=\"*\",\n",
|
||||
" suffixes=[\".js\"],\n",
|
||||
" parser=LanguageParser(language=Language.JS)\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "c44c0d3f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import (\n",
|
||||
" RecursiveCharacterTextSplitter,\n",
|
||||
" Language,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b1e0053d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"js_splitter = RecursiveCharacterTextSplitter.from_language(\n",
|
||||
" language=Language.JS, chunk_size=60, chunk_overlap=0\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "7dbe6188",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = js_splitter.split_documents(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "8a80d089",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"7"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "000a6011",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"class MyClass {\n",
|
||||
" constructor(name) {\n",
|
||||
" this.name = name;\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"greet() {\n",
|
||||
" console.log(`Hello, ${this.name}!`);\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"function main() {\n",
|
||||
" const name = prompt(\"Enter your name:\");\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"const obj = new MyClass(name);\n",
|
||||
" obj.greet();\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"// Code for: class MyClass {\n",
|
||||
"\n",
|
||||
"// Code for: function main() {\n",
|
||||
"\n",
|
||||
"--8<--\n",
|
||||
"\n",
|
||||
"main();\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\"\\n\\n--8<--\\n\\n\".join([document.page_content for document in result]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a634365e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tencent COS Directory\n",
|
||||
"\n",
|
||||
"This covers how to load document objects from a `Tencent COS Directory`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "85e97267",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#! pip install cos-python-sdk-v5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2f0cd6a5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TencentCOSDirectoryLoader\n",
|
||||
"from qcloud_cos import CosConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "321cc7f1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conf = CosConfig(\n",
|
||||
" Region=\"your cos region\",\n",
|
||||
" SecretId=\"your cos secret_id\",\n",
|
||||
" SecretKey=\"your cos secret_key\",\n",
|
||||
" )\n",
|
||||
"loader = TencentCOSDirectoryLoader(conf=conf, bucket=\"you_cos_bucket\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c50d2c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0690c40a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Specifying a prefix\n",
|
||||
"You can also specify a prefix for more finegrained control over what files to load."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "72d44781",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TencentCOSDirectoryLoader(conf=conf, bucket=\"you_cos_bucket\", prefix=\"fake\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d3c32db",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a634365e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tencent COS File\n",
|
||||
"\n",
|
||||
"This covers how to load document object from a `Tencent COS File`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "85e97267",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#! pip install cos-python-sdk-v5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2f0cd6a5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TencentCOSFileLoader\n",
|
||||
"from qcloud_cos import CosConfig"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "321cc7f1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conf = CosConfig(\n",
|
||||
" Region=\"your cos region\",\n",
|
||||
" SecretId=\"your cos secret_id\",\n",
|
||||
" SecretKey=\"your cos secret_key\",\n",
|
||||
" )\n",
|
||||
"loader = TencentCOSFileLoader(conf=conf, bucket=\"you_cos_bucket\", key=\"fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c50d2c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0690c40a",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -226,7 +226,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8de9ef16",
|
||||
"metadata": {},
|
||||
@@ -303,7 +302,7 @@
|
||||
"source": [
|
||||
"## Unstructured API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. Note that currently (as of 11 May 2023) the Unstructured API is open, but it will soon require an API. The [Unstructured documentation](https://unstructured-io.github.io/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. You can generate a free Unstructured API key [here](https://www.unstructured.io/api-key/). The [Unstructured documentation](https://unstructured-io.github.io/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -224,13 +224,33 @@
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Using proxies\n",
|
||||
"\n",
|
||||
"Sometimes you might need to use proxies to get around IP blocks. You can pass in a dictionary of proxies to the loader (and `requests` underneath) to use them."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1dd8ab23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"loader = WebBaseLoader(\n",
|
||||
" \"https://www.walmart.com/search?q=parrots\", proxies={\n",
|
||||
" \"http\": \"http://{username}:{password}:@proxy.service.com:6666/\",\n",
|
||||
" \"https\": \"https://{username}:{password}:@proxy.service.com:6666/\"\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"docs = loader.load()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -0,0 +1,214 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8cc82b48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MultiQueryRetriever\n",
|
||||
"\n",
|
||||
"Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce difference results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n",
|
||||
"\n",
|
||||
"The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c2f3f5f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Build a sample vectorDB\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.document_loaders import PyPDFLoader\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"# Load PDF\n",
|
||||
"path=\"path-to-files\"\n",
|
||||
"loaders = [\n",
|
||||
" PyPDFLoader(path+\"docs/cs229_lectures/MachineLearning-Lecture01.pdf\"),\n",
|
||||
" PyPDFLoader(path+\"docs/cs229_lectures/MachineLearning-Lecture02.pdf\"),\n",
|
||||
" PyPDFLoader(path+\"docs/cs229_lectures/MachineLearning-Lecture03.pdf\")\n",
|
||||
"]\n",
|
||||
"docs = []\n",
|
||||
"for loader in loaders:\n",
|
||||
" docs.extend(loader.load())\n",
|
||||
" \n",
|
||||
"# Split\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500,chunk_overlap = 150)\n",
|
||||
"splits = text_splitter.split_documents(docs)\n",
|
||||
"\n",
|
||||
"# VectorDB\n",
|
||||
"embedding = OpenAIEmbeddings()\n",
|
||||
"vectordb = Chroma.from_documents(documents=splits,embedding=embedding)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cca8f56c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Simple usage`\n",
|
||||
"\n",
|
||||
"Specify the LLM to use for query generation, and the retriver will do the rest."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "edbca101",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
|
||||
"question=\"What does the course say about regression?\"\n",
|
||||
"num_queries=3\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vectordb.as_retriever(),llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e5203612",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:root:Generated queries: [\"1. What is the course's perspective on regression?\", '2. How does the course discuss regression?', '3. What information does the course provide about regression?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"6"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"unique_docs = retriever_from_llm.get_relevant_documents(question=\"What does the course say about regression?\")\n",
|
||||
"len(unique_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c54a282f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Supplying your own prompt`\n",
|
||||
"\n",
|
||||
"You can also supply a prompt along with an output parser to split the results into a list of queries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d9afb0ca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain import LLMChain\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"\n",
|
||||
"# Output parser will split the LLM result into a list of queries\n",
|
||||
"class LineList(BaseModel):\n",
|
||||
" # \"lines\" is the key (attribute name) of the parsed output\n",
|
||||
" lines: List[str] = Field(description=\"Lines of text\")\n",
|
||||
"\n",
|
||||
"class LineListOutputParser(PydanticOutputParser):\n",
|
||||
" def __init__(self) -> None:\n",
|
||||
" super().__init__(pydantic_object=LineList)\n",
|
||||
" def parse(self, text: str) -> LineList:\n",
|
||||
" lines = text.strip().split(\"\\n\")\n",
|
||||
" return LineList(lines=lines)\n",
|
||||
"\n",
|
||||
"output_parser = LineListOutputParser()\n",
|
||||
" \n",
|
||||
"QUERY_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=\"\"\"You are an AI language model assistant. Your task is to generate five \n",
|
||||
" different versions of the given user question to retrieve relevant documents from a vector \n",
|
||||
" database. By generating multiple perspectives on the user question, your goal is to help\n",
|
||||
" the user overcome some of the limitations of the distance-based similarity search. \n",
|
||||
" Provide these alternative questions seperated by newlines.\n",
|
||||
" Original question: {question}\"\"\",\n",
|
||||
")\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"llm_chain = LLMChain(llm=llm,prompt=QUERY_PROMPT,output_parser=output_parser)\n",
|
||||
" \n",
|
||||
"# Other inputs\n",
|
||||
"question=\"What does the course say about regression?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "6660d7ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:root:Generated queries: [\"1. What is the course's perspective on regression?\", '2. Can you provide information on regression as discussed in the course?', '3. How does the course cover the topic of regression?', \"4. What are the course's teachings on regression?\", '5. In relation to the course, what is mentioned about regression?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"8"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Run\n",
|
||||
"retriever = MultiQueryRetriever(retriever=vectordb.as_retriever(), \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output\n",
|
||||
"\n",
|
||||
"# Results\n",
|
||||
"unique_docs = retriever.get_relevant_documents(question=\"What does the course say about regression?\")\n",
|
||||
"len(unique_docs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -58,6 +58,7 @@
|
||||
"from langchain.memory.chat_message_histories import ZepChatMessageHistory\n",
|
||||
"from langchain.schema import HumanMessage, AIMessage\n",
|
||||
"from uuid import uuid4\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"# Set this to your Zep server URL\n",
|
||||
"ZEP_API_URL = \"http://localhost:8000\""
|
||||
@@ -75,6 +76,25 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Provide your Zep API key. Note that this is optional. See https://docs.getzep.com/deployment/auth\n",
|
||||
"\n",
|
||||
"zep_api_key = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:29.118416Z",
|
||||
@@ -93,12 +113,13 @@
|
||||
"zep_chat_history = ZepChatMessageHistory(\n",
|
||||
" session_id=session_id,\n",
|
||||
" url=ZEP_API_URL,\n",
|
||||
" api_key=zep_api_key\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:30.271181Z",
|
||||
@@ -172,7 +193,7 @@
|
||||
"]\n",
|
||||
"\n",
|
||||
"for msg in test_history:\n",
|
||||
" zep_chat_history.append(\n",
|
||||
" zep_chat_history.add_message(\n",
|
||||
" HumanMessage(content=msg[\"content\"])\n",
|
||||
" if msg[\"role\"] == \"human\"\n",
|
||||
" else AIMessage(content=msg[\"content\"])\n",
|
||||
@@ -192,7 +213,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:32.979155Z",
|
||||
@@ -207,14 +228,14 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759001673780126, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n",
|
||||
" Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602262941130749, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n",
|
||||
" Document(page_content='Who were her contemporaries?', metadata={'score': 0.757553366415519, 'uuid': '41f9c41a-a205-41e1-b48b-a0a4cd943fc8', 'created_at': '2023-05-25T15:03:30.243995Z', 'role': 'human', 'token_count': 8}),\n",
|
||||
" Document(page_content='Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American science fiction author.', metadata={'score': 0.7546211059317948, 'uuid': '34678311-0098-4f1a-8fd4-5615ac692deb', 'created_at': '2023-05-25T15:03:30.231427Z', 'role': 'ai', 'token_count': 31}),\n",
|
||||
" Document(page_content='Which books of hers were made into movies?', metadata={'score': 0.7496714959247069, 'uuid': '18046c3a-9666-4d3e-b4f0-43d1394732b7', 'created_at': '2023-05-25T15:03:30.236837Z', 'role': 'human', 'token_count': 11})]"
|
||||
"[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8897116216176073, 'uuid': 'db60ff57-f259-4ec4-8a81-178ed4c6e54f', 'created_at': '2023-06-26T23:40:22.816214Z', 'role': 'ai', 'metadata': {'system': {'entities': [{'Label': 'GPE', 'Matches': [{'End': 20, 'Start': 15, 'Text': 'Sower'}], 'Name': 'Sower'}, {'Label': 'PERSON', 'Matches': [{'End': 65, 'Start': 51, 'Text': 'Octavia Butler'}], 'Name': 'Octavia Butler'}, {'Label': 'DATE', 'Matches': [{'End': 84, 'Start': 80, 'Text': '1993'}], 'Name': '1993'}, {'Label': 'PERSON', 'Matches': [{'End': 124, 'Start': 110, 'Text': 'Lauren Olamina'}], 'Name': 'Lauren Olamina'}]}}, 'token_count': 56}),\n",
|
||||
" Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.8856661080361157, 'uuid': 'f1a5981a-8f6d-4168-a548-6e9c32f35fa1', 'created_at': '2023-06-26T23:40:22.809621Z', 'role': 'human', 'metadata': {'system': {'entities': [{'Label': 'ORG', 'Matches': [{'End': 32, 'Start': 26, 'Text': 'Butler'}], 'Name': 'Butler'}, {'Label': 'WORK_OF_ART', 'Matches': [{'End': 61, 'Start': 41, 'Text': 'Parable of the Sower'}], 'Name': 'Parable of the Sower'}]}}, 'token_count': 23}),\n",
|
||||
" Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7757595298492976, 'uuid': '361d0043-1009-4e13-a7f0-8aea8b1ee869', 'created_at': '2023-06-26T23:40:22.709886Z', 'role': 'human', 'metadata': {'system': {'entities': [{'Label': 'PERSON', 'Matches': [{'End': 22, 'Start': 8, 'Text': 'Octavia Butler'}], 'Name': 'Octavia Butler'}], 'intent': 'The subject wants to know about the identity or background of an individual named Octavia Butler.'}}, 'token_count': 8}),\n",
|
||||
" Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7601242516059306, 'uuid': '56c45e8a-0f65-45f0-bc46-d9e65164b563', 'created_at': '2023-06-26T23:40:22.778836Z', 'role': 'ai', 'metadata': {'system': {'entities': [{'Label': 'PERSON', 'Matches': [{'End': 16, 'Start': 0, 'Text': \"Octavia Butler's\"}], 'Name': \"Octavia Butler's\"}, {'Label': 'ORG', 'Matches': [{'End': 58, 'Start': 41, 'Text': 'Ursula K. Le Guin'}], 'Name': 'Ursula K. Le Guin'}, {'Label': 'PERSON', 'Matches': [{'End': 76, 'Start': 60, 'Text': 'Samuel R. Delany'}], 'Name': 'Samuel R. Delany'}, {'Label': 'PERSON', 'Matches': [{'End': 93, 'Start': 82, 'Text': 'Joanna Russ'}], 'Name': 'Joanna Russ'}], 'intent': \"The subject is providing information about Octavia Butler's contemporaries.\"}}, 'token_count': 27}),\n",
|
||||
" Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7594731095320668, 'uuid': '6951f2fd-dfa4-4e05-9380-f322ef8f72f8', 'created_at': '2023-06-26T23:40:22.80464Z', 'role': 'ai', 'metadata': {'system': {'entities': [{'Label': 'ORG', 'Matches': [{'End': 40, 'Start': 23, 'Text': 'Ursula K. Le Guin'}], 'Name': 'Ursula K. Le Guin'}, {'Label': 'PERSON', 'Matches': [{'End': 55, 'Start': 44, 'Text': 'Joanna Russ'}], 'Name': 'Joanna Russ'}]}}, 'token_count': 18})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -226,6 +247,7 @@
|
||||
" session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever\n",
|
||||
" url=ZEP_API_URL,\n",
|
||||
" top_k=5,\n",
|
||||
" api_key=zep_api_key\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"await zep_retriever.aget_relevant_documents(\"Who wrote Parable of the Sower?\")"
|
||||
@@ -240,7 +262,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-25T15:03:34.713354Z",
|
||||
@@ -255,14 +277,14 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8897321402776546, 'uuid': '1c09603a-52c1-40d7-9d69-29f26256029c', 'created_at': '2023-05-25T15:03:30.268257Z', 'role': 'ai', 'token_count': 56}),\n",
|
||||
" Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.8857628682610436, 'uuid': 'f6706e8c-6c91-452f-8c1b-9559fd924657', 'created_at': '2023-05-25T15:03:30.265302Z', 'role': 'human', 'token_count': 23}),\n",
|
||||
" Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7759670375149477, 'uuid': '3a82a02f-056e-4c6a-b960-67ebdf3b2b93', 'created_at': '2023-05-25T15:03:30.2041Z', 'role': 'human', 'token_count': 8}),\n",
|
||||
" Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602854653476563, 'uuid': 'a2fc9c21-0897-46c8-bef7-6f5c0f71b04a', 'created_at': '2023-05-25T15:03:30.248065Z', 'role': 'ai', 'token_count': 27}),\n",
|
||||
" Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7595293992240313, 'uuid': 'f22f2498-6118-4c74-8718-aa89ccd7e3d6', 'created_at': '2023-05-25T15:03:30.261198Z', 'role': 'ai', 'token_count': 18})]"
|
||||
"[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.889661105796371, 'uuid': 'db60ff57-f259-4ec4-8a81-178ed4c6e54f', 'created_at': '2023-06-26T23:40:22.816214Z', 'role': 'ai', 'metadata': {'system': {'entities': [{'Label': 'GPE', 'Matches': [{'End': 20, 'Start': 15, 'Text': 'Sower'}], 'Name': 'Sower'}, {'Label': 'PERSON', 'Matches': [{'End': 65, 'Start': 51, 'Text': 'Octavia Butler'}], 'Name': 'Octavia Butler'}, {'Label': 'DATE', 'Matches': [{'End': 84, 'Start': 80, 'Text': '1993'}], 'Name': '1993'}, {'Label': 'PERSON', 'Matches': [{'End': 124, 'Start': 110, 'Text': 'Lauren Olamina'}], 'Name': 'Lauren Olamina'}]}}, 'token_count': 56}),\n",
|
||||
" Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.885754241595424, 'uuid': 'f1a5981a-8f6d-4168-a548-6e9c32f35fa1', 'created_at': '2023-06-26T23:40:22.809621Z', 'role': 'human', 'metadata': {'system': {'entities': [{'Label': 'ORG', 'Matches': [{'End': 32, 'Start': 26, 'Text': 'Butler'}], 'Name': 'Butler'}, {'Label': 'WORK_OF_ART', 'Matches': [{'End': 61, 'Start': 41, 'Text': 'Parable of the Sower'}], 'Name': 'Parable of the Sower'}]}}, 'token_count': 23}),\n",
|
||||
" Document(page_content='Who was Octavia Butler?', metadata={'score': 0.7758688965570713, 'uuid': '361d0043-1009-4e13-a7f0-8aea8b1ee869', 'created_at': '2023-06-26T23:40:22.709886Z', 'role': 'human', 'metadata': {'system': {'entities': [{'Label': 'PERSON', 'Matches': [{'End': 22, 'Start': 8, 'Text': 'Octavia Butler'}], 'Name': 'Octavia Butler'}], 'intent': 'The subject wants to know about the identity or background of an individual named Octavia Butler.'}}, 'token_count': 8}),\n",
|
||||
" Document(page_content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\", metadata={'score': 0.7602672137411663, 'uuid': '56c45e8a-0f65-45f0-bc46-d9e65164b563', 'created_at': '2023-06-26T23:40:22.778836Z', 'role': 'ai', 'metadata': {'system': {'entities': [{'Label': 'PERSON', 'Matches': [{'End': 16, 'Start': 0, 'Text': \"Octavia Butler's\"}], 'Name': \"Octavia Butler's\"}, {'Label': 'ORG', 'Matches': [{'End': 58, 'Start': 41, 'Text': 'Ursula K. Le Guin'}], 'Name': 'Ursula K. Le Guin'}, {'Label': 'PERSON', 'Matches': [{'End': 76, 'Start': 60, 'Text': 'Samuel R. Delany'}], 'Name': 'Samuel R. Delany'}, {'Label': 'PERSON', 'Matches': [{'End': 93, 'Start': 82, 'Text': 'Joanna Russ'}], 'Name': 'Joanna Russ'}], 'intent': \"The subject is providing information about Octavia Butler's contemporaries.\"}}, 'token_count': 27}),\n",
|
||||
" Document(page_content='You might want to read Ursula K. Le Guin or Joanna Russ.', metadata={'score': 0.7596040989115522, 'uuid': '6951f2fd-dfa4-4e05-9380-f322ef8f72f8', 'created_at': '2023-06-26T23:40:22.80464Z', 'role': 'ai', 'metadata': {'system': {'entities': [{'Label': 'ORG', 'Matches': [{'End': 40, 'Start': 23, 'Text': 'Ursula K. Le Guin'}], 'Name': 'Ursula K. Le Guin'}, {'Label': 'PERSON', 'Matches': [{'End': 55, 'Start': 44, 'Text': 'Joanna Russ'}], 'Name': 'Joanna Russ'}]}}, 'token_count': 18})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -304,7 +326,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,28 +2,34 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Alibaba Cloud OpenSearch\n",
|
||||
"\n",
|
||||
">[Alibaba Cloud Opensearch](https://www.alibabacloud.com/product/opensearch) OpenSearch is a one-stop platform to develop intelligent search services. OpenSearch was built based on the large-scale distributed search engine developed by Alibaba. OpenSearch serves more than 500 business cases in Alibaba Group and thousands of Alibaba Cloud customers. OpenSearch helps develop search services in different search scenarios, including e-commerce, O2O, multimedia, the content industry, communities and forums, and big data query in enterprises.\n",
|
||||
">[Alibaba Cloud Opensearch](https://www.alibabacloud.com/product/opensearch) is a one-stop platform to develop intelligent search services. `OpenSearch` was built on the large-scale distributed search engine developed by `Alibaba`. `OpenSearch` serves more than 500 business cases in Alibaba Group and thousands of Alibaba Cloud customers. `OpenSearch` helps develop search services in different search scenarios, including e-commerce, O2O, multimedia, the content industry, communities and forums, and big data query in enterprises.\n",
|
||||
"\n",
|
||||
">OpenSearch helps you develop high quality, maintenance-free, and high performance intelligent search services to provide your users with high search efficiency and accuracy.\n",
|
||||
">`OpenSearch` helps you develop high quality, maintenance-free, and high performance intelligent search services to provide your users with high search efficiency and accuracy.\n",
|
||||
"\n",
|
||||
">OpenSearch provides the vector search feature. In specific scenarios, especially test question search and image search scenarios, you can use the vector search feature together with the multimodal search feature to improve the accuracy of search results. This topic describes the syntax and usage notes of vector indexes.\n",
|
||||
">`OpenSearch` provides the vector search feature. In specific scenarios, especially test question search and image search scenarios, you can use the vector search feature together with the multimodal search feature to improve the accuracy of search results. This topic describes the syntax and usage notes of vector indexes.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Alibaba Cloud OpenSearch Vector Search Edition`.\n",
|
||||
"To run, you should have an [OpenSearch Vector Search Edition](https://opensearch.console.aliyun.com) instance up and running:\n",
|
||||
"- Read the [help document](https://www.alibabacloud.com/help/en/opensearch/latest/vector-search) to quickly familiarize and configure OpenSearch Vector Search Edition instance.\n"
|
||||
"\n",
|
||||
"Read the [help document](https://www.alibabacloud.com/help/en/opensearch/latest/vector-search) to quickly familiarize and configure OpenSearch Vector Search Edition instance.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install alibabacloud-ha3engine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After completing the configuration, follow these steps to connect to the instance, index documents, and perform vector retrieval."
|
||||
]
|
||||
@@ -33,6 +39,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -49,9 +58,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Split documents and get embeddings by call OpenAI API"
|
||||
]
|
||||
@@ -61,6 +68,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -80,7 +90,6 @@
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
@@ -94,6 +103,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -133,9 +145,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create an opensearch access instance by settings."
|
||||
]
|
||||
@@ -145,6 +155,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -159,9 +172,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"or"
|
||||
]
|
||||
@@ -171,6 +182,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -183,9 +197,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add texts and build index."
|
||||
]
|
||||
@@ -195,6 +207,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -208,9 +223,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Query and retrieve data."
|
||||
]
|
||||
@@ -220,6 +233,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -233,9 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Query and retrieve data with metadata\n"
|
||||
]
|
||||
@@ -245,6 +259,9 @@
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
@@ -260,7 +277,6 @@
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
@@ -272,23 +288,23 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AwaDB\n",
|
||||
"[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n",
|
||||
"This notebook shows how to use functionality related to the AwaDB."
|
||||
">[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `AwaDB`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -184,7 +185,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure Cognitive Search"
|
||||
"# Azure Cognitive Search\n",
|
||||
"\n",
|
||||
">[Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) (formerly known as `Azure Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Install Azure Cognitive Search SDK"
|
||||
"## Install Azure Cognitive Search SDK"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -27,7 +27,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -44,12 +43,10 @@
|
||||
"import openai\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.schema import BaseRetriever\n",
|
||||
"from langchain.vectorstores.azuresearch import AzureSearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -74,7 +71,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -95,7 +91,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -120,7 +115,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -148,7 +142,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -187,7 +180,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -226,7 +218,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.13 ('.venv': venv)",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -240,9 +232,8 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "645053d6307d413a1a75681b5ebb6449bb2babba4bcb0bf65a1ddc3dbefb108a"
|
||||
@@ -250,5 +241,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install \"cassio>=0.0.5\""
|
||||
"!pip install \"cassio>=0.0.7\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -44,14 +44,16 @@
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"database_mode = (input('\\n(L)ocal Cassandra or (A)stra DB? ')).upper()\n",
|
||||
"database_mode = (input('\\n(C)assandra or (A)stra DB? ')).upper()\n",
|
||||
"\n",
|
||||
"keyspace_name = input('\\nKeyspace name? ')\n",
|
||||
"\n",
|
||||
"if database_mode == 'A':\n",
|
||||
" ASTRA_DB_APPLICATION_TOKEN = getpass.getpass('\\nAstra DB Token (\"AstraCS:...\") ')\n",
|
||||
" #\n",
|
||||
" ASTRA_DB_SECURE_BUNDLE_PATH = input('Full path to your Secure Connect Bundle? ')"
|
||||
" ASTRA_DB_SECURE_BUNDLE_PATH = input('Full path to your Secure Connect Bundle? ')\n",
|
||||
"elif database_mode == 'C':\n",
|
||||
" CASSANDRA_CONTACT_POINTS = input('Contact points? (comma-separated, empty for localhost) ').strip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -72,8 +74,15 @@
|
||||
"from cassandra.cluster import Cluster\n",
|
||||
"from cassandra.auth import PlainTextAuthProvider\n",
|
||||
"\n",
|
||||
"if database_mode == 'L':\n",
|
||||
" cluster = Cluster()\n",
|
||||
"if database_mode == 'C':\n",
|
||||
" if CASSANDRA_CONTACT_POINTS:\n",
|
||||
" cluster = Cluster([\n",
|
||||
" cp.strip()\n",
|
||||
" for cp in CASSANDRA_CONTACT_POINTS.split(',')\n",
|
||||
" if cp.strip()\n",
|
||||
" ])\n",
|
||||
" else:\n",
|
||||
" cluster = Cluster()\n",
|
||||
" session = cluster.connect()\n",
|
||||
"elif database_mode == 'A':\n",
|
||||
" ASTRA_DB_CLIENT_ID = \"token\"\n",
|
||||
@@ -261,7 +270,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -9,20 +9,6 @@
|
||||
"\n",
|
||||
">[Chroma](https://docs.trychroma.com/getting-started) is a AI-native open-source vector database focused on developer productivity and happiness. Chroma is licensed under Apache 2.0.\n",
|
||||
"\n",
|
||||
"<a href=\"https://discord.gg/MMeYNTmh3x\" target=\"_blank\">\n",
|
||||
" <img src=\"https://img.shields.io/discord/1073293645303795742\" alt=\"Discord\" />\n",
|
||||
"</a> \n",
|
||||
"<a href=\"https://github.com/chroma-core/chroma/blob/master/LICENSE\" target=\"_blank\">\n",
|
||||
" <img src=\"https://img.shields.io/static/v1?label=license&message=Apache 2.0&color=white\" alt=\"License\" />\n",
|
||||
"</a> \n",
|
||||
"<img src=\"https://github.com/chroma-core/chroma/actions/workflows/chroma-integration-test.yml/badge.svg?branch=main\" alt=\"Integration Tests\" />\n",
|
||||
"\n",
|
||||
"- [Website](https://www.trychroma.com/)\n",
|
||||
"- [Documentation](https://docs.trychroma.com/)\n",
|
||||
"- [Twitter](https://twitter.com/trychroma)\n",
|
||||
"- [Discord](https://discord.gg/MMeYNTmh3x)\n",
|
||||
"\n",
|
||||
"Chroma is fully-typed, fully-tested and fully-documented.\n",
|
||||
"\n",
|
||||
"Install Chroma with:\n",
|
||||
"\n",
|
||||
@@ -47,19 +33,6 @@
|
||||
"View full docs at [docs](https://docs.trychroma.com/reference/Collection). To access these methods directly, you can do `._collection_.method()`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "12e83df7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# first install dependencies\n",
|
||||
"!pip install langchain\n",
|
||||
"!pip install langchainplus_sdk\n",
|
||||
"!pip install chromadb\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b5ffbf8",
|
||||
@@ -576,7 +549,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,399 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ClickHouse Vector Search\n",
|
||||
"\n",
|
||||
"> [ClickHouse](https://clickhouse.com/) is the fastest and most resource efficient open-source database for real-time apps and analytics with full SQL support and a wide range of functions to assist users in writing analytical queries. Lately added data structures and distance search functions (like `L2Distance`) as well as [approximate nearest neighbor search indexes](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/annindexes) enable ClickHouse to be used as a high performance and scalable vector database to store and search vectors with SQL.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `ClickHouse` vector search."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43ead5d5-2c1f-4dce-a69a-cb00e4f9d6f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up envrionments"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2c434bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setting up local clickhouse server with docker (optional)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "249a7751",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:43:43.035606Z",
|
||||
"start_time": "2023-06-03T08:43:42.618531Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:23.4.2.11"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7bd3c1c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setup up clickhouse client driver"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9d614bf8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install clickhouse-connect"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "15a1d477-9cdb-4d82-b019-96951ecb2b72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "91003ea5-0c8c-436c-a5de-aaeaeef2f458",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:49:35.383673Z",
|
||||
"start_time": "2023-06-03T08:49:33.984547Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"if not os.environ['OPENAI_API_KEY']:\n",
|
||||
" os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:31.554934Z",
|
||||
"start_time": "2023-06-03T08:33:31.549590Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Clickhouse, ClickhouseSettings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:32.527387Z",
|
||||
"start_time": "2023-06-03T08:33:32.501312Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6e104aee",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:35.503823Z",
|
||||
"start_time": "2023-06-03T08:33:33.745832Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 42/42 [00:00<00:00, 2801.49it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for d in docs:\n",
|
||||
" d.metadata = {'some': 'metadata'}\n",
|
||||
"settings = ClickhouseSettings(table=\"clickhouse_vector_search_example\")\n",
|
||||
"docsearch = Clickhouse.from_documents(docs, embeddings, config=settings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3a8b105",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Get connection info and data schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "69996818",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:28:58.252991Z",
|
||||
"start_time": "2023-06-03T08:28:58.197560Z"
|
||||
},
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[92m\u001b[1mdefault.clickhouse_vector_search_example @ localhost:8123\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1musername: None\u001b[0m\n",
|
||||
"\n",
|
||||
"Table Schema:\n",
|
||||
"---------------------------------------------------\n",
|
||||
"|\u001b[94mid \u001b[0m|\u001b[96mNullable(String) \u001b[0m|\n",
|
||||
"|\u001b[94mdocument \u001b[0m|\u001b[96mNullable(String) \u001b[0m|\n",
|
||||
"|\u001b[94membedding \u001b[0m|\u001b[96mArray(Float32) \u001b[0m|\n",
|
||||
"|\u001b[94mmetadata \u001b[0m|\u001b[96mObject('json') \u001b[0m|\n",
|
||||
"|\u001b[94muuid \u001b[0m|\u001b[96mUUID \u001b[0m|\n",
|
||||
"---------------------------------------------------\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(str(docsearch))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "324ac147",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Clickhouse table schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5bd7c5b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> Clickhouse table will be automatically created if not exist by default. Advanced users could pre-create the table with optimized settings. For distributed Clickhouse cluster with sharding, table engine should be configured as `Distributed`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "54f4f561",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Clickhouse Table DDL:\n",
|
||||
"\n",
|
||||
"CREATE TABLE IF NOT EXISTS default.clickhouse_vector_search_example(\n",
|
||||
" id Nullable(String),\n",
|
||||
" document Nullable(String),\n",
|
||||
" embedding Array(Float32),\n",
|
||||
" metadata JSON,\n",
|
||||
" uuid UUID DEFAULT generateUUIDv4(),\n",
|
||||
" CONSTRAINT cons_vec_len CHECK length(embedding) = 1536,\n",
|
||||
" INDEX vec_idx embedding TYPE annoy(100,'L2Distance') GRANULARITY 1000\n",
|
||||
") ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Clickhouse Table DDL:\\n\\n{docsearch.schema}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f59360c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering\n",
|
||||
"\n",
|
||||
"You can have direct access to ClickHouse SQL where statement. You can write `WHERE` clause following standard SQL.\n",
|
||||
"\n",
|
||||
"**NOTE**: Please be aware of SQL injection, this interface must not be directly called by end-user.\n",
|
||||
"\n",
|
||||
"If you custimized your `column_map` under your setting, you search with filter like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "232055f6",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:29:36.680805Z",
|
||||
"start_time": "2023-06-03T08:29:34.963676Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 42/42 [00:00<00:00, 6939.56it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.vectorstores import Clickhouse, ClickhouseSettings\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"for i, d in enumerate(docs):\n",
|
||||
" d.metadata = {'doc_id': i}\n",
|
||||
"\n",
|
||||
"docsearch = Clickhouse.from_documents(docs, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ddbcee77",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:29:43.487436Z",
|
||||
"start_time": "2023-06-03T08:29:43.040831Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0.6779101415357189 {'doc_id': 0} Madam Speaker, Madam...\n",
|
||||
"0.6997970363474885 {'doc_id': 8} And so many families...\n",
|
||||
"0.7044504914336727 {'doc_id': 1} Groups of citizens b...\n",
|
||||
"0.7053558702165094 {'doc_id': 6} And I’m taking robus...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"meta = docsearch.metadata_column\n",
|
||||
"output = docsearch.similarity_search_with_relevance_scores('What did the president say about Ketanji Brown Jackson?', \n",
|
||||
" k=4, where_str=f\"{meta}.doc_id<10\")\n",
|
||||
"for d, dist in output:\n",
|
||||
" print(dist, d.metadata, d.page_content[:20] + '...')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deleting your data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "fb6a9d36",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:30:24.822384Z",
|
||||
"start_time": "2023-06-03T08:30:24.798571Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch.drop()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,22 +14,12 @@
|
||||
"This notebook shows how to use functionality related to the `Elasticsearch` database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# ElasticVectorSearch class"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tKSYjyTBtSLc"
|
||||
},
|
||||
"id": "tKSYjyTBtSLc"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409"
|
||||
"id": "b66c12b2-2a07-4136-ac77-ce1c9fa7a409",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Installation"
|
||||
@@ -104,8 +94,8 @@
|
||||
"execution_count": null,
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c"
|
||||
"id": "d6197931-cbe5-460c-a5e6-b5eedb83887c",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -117,9 +107,9 @@
|
||||
"execution_count": null,
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
"outputId": "fd16b37f-cb76-40a9-b83f-eab58dd0d912"
|
||||
"outputId": "fd16b37f-cb76-40a9-b83f-eab58dd0d912",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -141,8 +131,8 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0"
|
||||
"id": "f6030187-0bd7-4798-8372-a265036af5e0",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Example"
|
||||
@@ -153,8 +143,8 @@
|
||||
"execution_count": null,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "aac9563e"
|
||||
"id": "aac9563e",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -169,8 +159,8 @@
|
||||
"execution_count": null,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "a3c3999a"
|
||||
"id": "a3c3999a",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -189,8 +179,8 @@
|
||||
"execution_count": null,
|
||||
"id": "12eb86d8",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"id": "12eb86d8"
|
||||
"id": "12eb86d8",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -235,43 +225,49 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# ElasticKnnSearch Class\n",
|
||||
"The `ElasticKnnSearch` implements features allowing storing vectors and documents in Elasticsearch for use with approximate [kNN search](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html)"
|
||||
],
|
||||
"id": "FheGPztJsrRB",
|
||||
"metadata": {
|
||||
"id": "FheGPztJsrRB"
|
||||
},
|
||||
"id": "FheGPztJsrRB"
|
||||
"source": [
|
||||
"# ElasticKnnSearch Class\n",
|
||||
"The `ElasticKnnSearch` implements features allowing storing vectors and documents in Elasticsearch for use with approximate [kNN search](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!pip install langchain elasticsearch"
|
||||
],
|
||||
"execution_count": null,
|
||||
"id": "gRVcbh5zqCJQ",
|
||||
"metadata": {
|
||||
"id": "gRVcbh5zqCJQ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "gRVcbh5zqCJQ"
|
||||
"source": [
|
||||
"!pip install langchain elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "TJtqiw5AqBp8",
|
||||
"metadata": {
|
||||
"id": "TJtqiw5AqBp8"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores.elastic_vector_search import ElasticKnnSearch\n",
|
||||
"from langchain.embeddings import ElasticsearchEmbeddings\n",
|
||||
"import elasticsearch"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TJtqiw5AqBp8"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "TJtqiw5AqBp8"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "XHfC0As6qN3T",
|
||||
"metadata": {
|
||||
"id": "XHfC0As6qN3T"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ElasticsearchEmbeddings\n",
|
||||
"model_id = \"<model_id_from_es>\"\n",
|
||||
@@ -281,16 +277,16 @@
|
||||
"es_password = \"es_pass\"\n",
|
||||
"test_index = \"<index_name>\"\n",
|
||||
"# input_field = \"your_input_field\" # if different from 'text_field'"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XHfC0As6qN3T"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "XHfC0As6qN3T"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "UkTipx1lqc3h",
|
||||
"metadata": {
|
||||
"id": "UkTipx1lqc3h"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate embedding object\n",
|
||||
"embeddings = ElasticsearchEmbeddings.from_credentials(\n",
|
||||
@@ -300,16 +296,16 @@
|
||||
" es_user=es_user,\n",
|
||||
" es_password=es_password,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UkTipx1lqc3h"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "UkTipx1lqc3h"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "74psgD0oqjYK",
|
||||
"metadata": {
|
||||
"id": "74psgD0oqjYK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ElasticKnnSearch\n",
|
||||
"knn_search = ElasticKnnSearch(\n",
|
||||
@@ -319,26 +315,26 @@
|
||||
" index_name=test_index,\n",
|
||||
" embedding=embeddings,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "74psgD0oqjYK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "74psgD0oqjYK"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test adding vectors"
|
||||
],
|
||||
"id": "7AfgIKLWqnQl",
|
||||
"metadata": {
|
||||
"id": "7AfgIKLWqnQl"
|
||||
},
|
||||
"id": "7AfgIKLWqnQl"
|
||||
"source": [
|
||||
"## Test adding vectors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "yNUUIaL9qmze",
|
||||
"metadata": {
|
||||
"id": "yNUUIaL9qmze"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `add_texts` method\n",
|
||||
"texts = [\"Hello, world!\", \"Machine learning is fun.\", \"I love Python.\"]\n",
|
||||
@@ -351,26 +347,26 @@
|
||||
" \"Python is great for data analysis.\",\n",
|
||||
"]\n",
|
||||
"knn_search.from_texts(new_texts, dims=dims)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yNUUIaL9qmze"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "yNUUIaL9qmze"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test knn search using query vector builder "
|
||||
],
|
||||
"id": "0zdR-Iubquov",
|
||||
"metadata": {
|
||||
"id": "0zdR-Iubquov"
|
||||
},
|
||||
"id": "0zdR-Iubquov"
|
||||
"source": [
|
||||
"## Test knn search using query vector builder "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bwR4jYvqqxTo",
|
||||
"metadata": {
|
||||
"id": "bwR4jYvqqxTo"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -387,26 +383,26 @@
|
||||
"print(\n",
|
||||
" f\"The 'text' field value from the top hit is: '{hybrid_result['hits']['hits'][0]['_source']['text']}'\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bwR4jYvqqxTo"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "bwR4jYvqqxTo"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test knn search using pre generated vector \n"
|
||||
],
|
||||
"id": "ltXYqp0qqz7R",
|
||||
"metadata": {
|
||||
"id": "ltXYqp0qqz7R"
|
||||
},
|
||||
"id": "ltXYqp0qqz7R"
|
||||
"source": [
|
||||
"## Test knn search using pre generated vector \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "O5COtpTqq23t",
|
||||
"metadata": {
|
||||
"id": "O5COtpTqq23t"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate embedding for tests\n",
|
||||
"query_text = \"Hello\"\n",
|
||||
@@ -428,26 +424,26 @@
|
||||
"print(\n",
|
||||
" f\"The 'text' field value from the top hit is: '{knn_result['hits']['hits'][0]['_source']['text']}'\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "O5COtpTqq23t"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "O5COtpTqq23t"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test source option"
|
||||
],
|
||||
"id": "0dnmimcJq42C",
|
||||
"metadata": {
|
||||
"id": "0dnmimcJq42C"
|
||||
},
|
||||
"id": "0dnmimcJq42C"
|
||||
"source": [
|
||||
"## Test source option"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "v4_B72nHq7g1",
|
||||
"metadata": {
|
||||
"id": "v4_B72nHq7g1"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -460,26 +456,26 @@
|
||||
" query=query, model_id=model_id, k=2, source=False\n",
|
||||
")\n",
|
||||
"assert not \"_source\" in hybrid_result[\"hits\"][\"hits\"][0].keys()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "v4_B72nHq7g1"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "v4_B72nHq7g1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Test fields option "
|
||||
],
|
||||
"id": "teHgJgrlq-Jb",
|
||||
"metadata": {
|
||||
"id": "teHgJgrlq-Jb"
|
||||
},
|
||||
"id": "teHgJgrlq-Jb"
|
||||
"source": [
|
||||
"## Test fields option "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "utNBbpZYrAYW",
|
||||
"metadata": {
|
||||
"id": "utNBbpZYrAYW"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -492,72 +488,72 @@
|
||||
" query=query, model_id=model_id, k=2, fields=[\"text\"]\n",
|
||||
")\n",
|
||||
"assert \"text\" in hybrid_result[\"hits\"][\"hits\"][0][\"fields\"].keys()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "utNBbpZYrAYW"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "utNBbpZYrAYW"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Test with es client connection rather than cloud_id "
|
||||
],
|
||||
"id": "hddsIFferBy1",
|
||||
"metadata": {
|
||||
"id": "hddsIFferBy1"
|
||||
},
|
||||
"id": "hddsIFferBy1"
|
||||
"source": [
|
||||
"### Test with es client connection rather than cloud_id "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bXqrUnoirFia",
|
||||
"metadata": {
|
||||
"id": "bXqrUnoirFia"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create Elasticsearch connection\n",
|
||||
"es_connection = Elasticsearch(\n",
|
||||
" hosts=[\"https://es_cluster_url:port\"], basic_auth=(\"user\", \"password\")\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bXqrUnoirFia"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "bXqrUnoirFia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "TIM__Hm8rSEW",
|
||||
"metadata": {
|
||||
"id": "TIM__Hm8rSEW"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Instantiate ElasticsearchEmbeddings using es_connection\n",
|
||||
"embeddings = ElasticsearchEmbeddings.from_es_connection(\n",
|
||||
" model_id,\n",
|
||||
" es_connection,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TIM__Hm8rSEW"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "TIM__Hm8rSEW"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1-CdnOrArVc_",
|
||||
"metadata": {
|
||||
"id": "1-CdnOrArVc_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize ElasticKnnSearch\n",
|
||||
"knn_search = ElasticKnnSearch(\n",
|
||||
" es_connection=es_connection, index_name=test_index, embedding=embeddings\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1-CdnOrArVc_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "1-CdnOrArVc_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0kgyaL6QrYVF",
|
||||
"metadata": {
|
||||
"id": "0kgyaL6QrYVF"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test `knn_search` method with model_id and query_text\n",
|
||||
"query = \"Hello\"\n",
|
||||
@@ -566,16 +562,13 @@
|
||||
"print(\n",
|
||||
" f\"The 'text' field value from the top hit is: '{knn_result['hits']['hits'][0]['_source']['text']}'\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0kgyaL6QrYVF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"id": "0kgyaL6QrYVF"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
@@ -592,11 +585,8 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"colab": {
|
||||
"provenance": []
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,15 @@
|
||||
"Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install psycopg2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -149,7 +158,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,222 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MongoDB Atlas\n",
|
||||
"\n",
|
||||
">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS , Azure, and GCP. It now has support for native Vector Search on your MongoDB document data.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use `MongoDB Atlas Vector Search` to store your embeddings in MongoDB documents, create a vector search index, and perform KNN search with an approximate nearest neighbor algorithm.\n",
|
||||
"\n",
|
||||
"It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in Public Preview and available for evaluation purposes, to validate functionality, and to gather feedback from public preview users. It is not recommended for production deployments as we may introduce breaking changes.\n",
|
||||
"\n",
|
||||
"To use MongoDB Atlas, you must first deploy a cluster. We have a Forever-Free tier of clusters available. \n",
|
||||
"To get started head over to Atlas here: [quick start](https://www.mongodb.com/docs/atlas/getting-started/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4c41cad-08ef-4f72-a545-2151e4598efe",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pymongo"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c1e38361-c1fe-4ac6-86e9-c90ebaf7ae87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"MONGODB_ATLAS_CLUSTER_URI = getpass.getpass(\"MongoDB Atlas Cluster URI:\")\n",
|
||||
"MONGODB_ATLAS_CLUSTER_URI = os.environ[\"MONGODB_ATLAS_CLUSTER_URI\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457ace44-1d95-4001-9dd5-78811ab208ad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we need to set up our OpenAI API Key. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d8f240d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"OPENAI_API_KEY = os.environ[\"OPENAI_API_KEY\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3ecc42",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's create a vector search index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Vector Search index.\n",
|
||||
"You can name the index `langchain_demo` and create the index on the namespace `lanchain_db.langchain_col`. Finally, write the following definition in the JSON editor on MongoDB Atlas:\n",
|
||||
"\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"mappings\": {\n",
|
||||
" \"dynamic\": true,\n",
|
||||
" \"fields\": {\n",
|
||||
" \"embedding\": {\n",
|
||||
" \"dimensions\": 1536,\n",
|
||||
" \"similarity\": \"cosine\",\n",
|
||||
" \"type\": \"knnVector\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import MongoDBAtlasVectorSearch\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6e104aee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pymongo import MongoClient\n",
|
||||
"\n",
|
||||
"# initialize MongoDB python client\n",
|
||||
"client = MongoClient(MONGODB_ATLAS_CLUSTER_URI)\n",
|
||||
"\n",
|
||||
"db_name = \"lanchain_db\"\n",
|
||||
"collection_name = \"langchain_col\"\n",
|
||||
"collection = client[db_name][collection_name]\n",
|
||||
"index_name = \"langchain_demo\"\n",
|
||||
"\n",
|
||||
"# insert the documents in MongoDB Atlas with their embedding\n",
|
||||
"docsearch = MongoDBAtlasVectorSearch.from_documents(\n",
|
||||
" docs, embeddings, collection=collection, index_name=index_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# perform a similarity search between the embedding of the query and the embeddings of the documents\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "851a2ec9-9390-49a4-8412-3e132c9f789d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can reuse the vector search index you created, make sure the `OPENAI_API_KEY` environment variable is set up, then execute another query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6336fe79-3e73-48be-b20a-0ff1bb6a4399",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pymongo import MongoClient\n",
|
||||
"from langchain.vectorstores import MongoDBAtlasVectorSearch\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"MONGODB_ATLAS_URI = os.environ[\"MONGODB_ATLAS_URI\"]\n",
|
||||
"\n",
|
||||
"# initialize MongoDB python client\n",
|
||||
"client = MongoClient(MONGODB_ATLAS_URI)\n",
|
||||
"\n",
|
||||
"db_name = \"langchain_db\"\n",
|
||||
"collection_name = \"langchain_col\"\n",
|
||||
"collection = client[db_name][collection_name]\n",
|
||||
"index_name = \"langchain_index\"\n",
|
||||
"\n",
|
||||
"# initialize vector store\n",
|
||||
"vectorStore = MongoDBAtlasVectorSearch(\n",
|
||||
" collection, OpenAIEmbeddings(), index_name=index_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# perform a similarity search between the embedding of the query and the embeddings of the documents\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = vectorStore.similarity_search(query)\n",
|
||||
"\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,14 +7,14 @@
|
||||
"source": [
|
||||
"# MongoDB Atlas Vector Search\n",
|
||||
"\n",
|
||||
">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS , Azure, and GCP. It now has support for native Vector Search on your MongoDB document data.\n",
|
||||
">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a document database managed in the cloud. It also enables Lucene and its vector search feature.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use `MongoDB Atlas Vector Search` to store your embeddings in MongoDB documents, create a vector search index, and perform KNN search with an approximate nearest neighbor algorithm.\n",
|
||||
"This notebook shows how to use the functionality related to the `MongoDB Atlas Vector Search` feature where you can store your embeddings in MongoDB documents and create a Lucene vector index to perform a KNN search.\n",
|
||||
"\n",
|
||||
"It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in Public Preview and available for evaluation purposes, to validate functionality, and to gather feedback from public preview users. It is not recommended for production deployments as we may introduce breaking changes.\n",
|
||||
"It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes.\n",
|
||||
"\n",
|
||||
"To use MongoDB Atlas, you must first deploy a cluster. We have a Forever-Free tier of clusters available. \n",
|
||||
"To get started head over to Atlas here: [quick start](https://www.mongodb.com/docs/atlas/getting-started/)."
|
||||
"To use MongoDB Atlas, you must have first deployed a cluster. Free clusters are available. \n",
|
||||
"Here is the MongoDB Atlas [quick start](https://www.mongodb.com/docs/atlas/getting-started/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -37,29 +37,16 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"MONGODB_ATLAS_CLUSTER_URI = getpass.getpass(\"MongoDB Atlas Cluster URI:\")\n",
|
||||
"MONGODB_ATLAS_CLUSTER_URI = os.environ[\"MONGODB_ATLAS_CLUSTER_URI\"]"
|
||||
"MONGODB_ATLAS_URI = os.environ['MONGODB_ATLAS_URI']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457ace44-1d95-4001-9dd5-78811ab208ad",
|
||||
"id": "320af802-9271-46ee-948f-d2453933d44b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we need to set up our OpenAI API Key. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2d8f240d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"OPENAI_API_KEY = os.environ[\"OPENAI_API_KEY\"]"
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key. Make sure the environment variable `OPENAI_API_KEY` is set up before proceeding."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -67,8 +54,8 @@
|
||||
"id": "1f3ecc42",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's create a vector search index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Vector Search index.\n",
|
||||
"You can name the index `langchain_demo` and create the index on the namespace `lanchain_db.langchain_col`. Finally, write the following definition in the JSON editor on MongoDB Atlas:\n",
|
||||
"Now, let's create a Lucene vector index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Search index.\n",
|
||||
"You can name the index `langchain_demo` and create the index on the namespace `lanchain_db.langchain_col`. Finally, write the following definition in the JSON editor:\n",
|
||||
"\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
@@ -109,8 +96,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"loader = TextLoader('../../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
@@ -128,7 +114,7 @@
|
||||
"from pymongo import MongoClient\n",
|
||||
"\n",
|
||||
"# initialize MongoDB python client\n",
|
||||
"client = MongoClient(MONGODB_ATLAS_CLUSTER_URI)\n",
|
||||
"client = MongoClient(MONGODB_ATLAS_CONNECTION_STRING)\n",
|
||||
"\n",
|
||||
"db_name = \"lanchain_db\"\n",
|
||||
"collection_name = \"langchain_col\"\n",
|
||||
@@ -137,7 +123,10 @@
|
||||
"\n",
|
||||
"# insert the documents in MongoDB Atlas with their embedding\n",
|
||||
"docsearch = MongoDBAtlasVectorSearch.from_documents(\n",
|
||||
" docs, embeddings, collection=collection, index_name=index_name\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" collection=collection,\n",
|
||||
" index_name=index_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# perform a similarity search between the embedding of the query and the embeddings of the documents\n",
|
||||
@@ -154,48 +143,6 @@
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "851a2ec9-9390-49a4-8412-3e132c9f789d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can reuse the vector search index you created, make sure the `OPENAI_API_KEY` environment variable is set up, then execute another query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6336fe79-3e73-48be-b20a-0ff1bb6a4399",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pymongo import MongoClient\n",
|
||||
"from langchain.vectorstores import MongoDBAtlasVectorSearch\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"MONGODB_ATLAS_URI = os.environ[\"MONGODB_ATLAS_URI\"]\n",
|
||||
"\n",
|
||||
"# initialize MongoDB python client\n",
|
||||
"client = MongoClient(MONGODB_ATLAS_URI)\n",
|
||||
"\n",
|
||||
"db_name = \"langchain_db\"\n",
|
||||
"collection_name = \"langchain_col\"\n",
|
||||
"collection = client[db_name][collection_name]\n",
|
||||
"index_name = \"langchain_index\"\n",
|
||||
"\n",
|
||||
"# initialize vector store\n",
|
||||
"vectorStore = MongoDBAtlasVectorSearch(\n",
|
||||
" collection, OpenAIEmbeddings(), index_name=index_name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# perform a similarity search between the embedding of the query and the embeddings of the documents\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = vectorStore.similarity_search(query)\n",
|
||||
"\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -214,7 +161,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"id": "01a9a035",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### similarity_search using Approximate k-NN\n",
|
||||
"## similarity_search using Approximate k-NN\n",
|
||||
"\n",
|
||||
"`similarity_search` using `Approximate k-NN` Search with Custom Parameters"
|
||||
]
|
||||
@@ -182,7 +182,7 @@
|
||||
"id": "0d0cd877",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### similarity_search using Script Scoring\n",
|
||||
"## similarity_search using Script Scoring\n",
|
||||
"\n",
|
||||
"`similarity_search` using `Script Scoring` with Custom Parameters"
|
||||
]
|
||||
@@ -221,7 +221,7 @@
|
||||
"id": "a4af96cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### similarity_search using Painless Scripting\n",
|
||||
"## similarity_search using Painless Scripting\n",
|
||||
"\n",
|
||||
"`similarity_search` using `Painless Scripting` with Custom Parameters"
|
||||
]
|
||||
@@ -258,32 +258,35 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f8fb0d0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Maximum marginal relevance search (MMR)\n",
|
||||
"## Maximum marginal relevance search (MMR)\n",
|
||||
"If you’d like to look up for some similar documents, but you’d also like to receive diverse results, MMR is method you should consider. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ba85e092",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.max_marginal_relevance_search(query, k=2, fetch_k=10, lambda_param=0.5)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73264864",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a preexisting OpenSearch instance\n",
|
||||
"## Using a preexisting OpenSearch instance\n",
|
||||
"\n",
|
||||
"It's also possible to use a preexisting OpenSearch instance with documents that already have vectors present."
|
||||
]
|
||||
@@ -330,7 +333,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -201,14 +201,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity search with score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Similarity Search with Euclidean Distance (Default)"
|
||||
"## Similarity Search with Euclidean Distance (Default)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -303,14 +296,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Working with vectorstore in PG"
|
||||
"## Working with vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Uploading a vectorstore in PG "
|
||||
"### Uploading a vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -336,7 +329,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Retrieving a vectorstore in PG"
|
||||
"### Retrieving a vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -498,7 +491,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -626,6 +626,44 @@
|
||||
"source": [
|
||||
"## Customizing Qdrant\n",
|
||||
"\n",
|
||||
"There are some options to use an existing Qdrant collection within your Langchain application. In such cases you may need to define how to map Qdrant point into the Langchain `Document`.\n",
|
||||
"\n",
|
||||
"### Named vectors\n",
|
||||
"\n",
|
||||
"Qdrant supports [multiple vectors per point](https://qdrant.tech/documentation/concepts/collections/#collection-with-multiple-vectors) by named vectors. Langchain requires just a single embedding per document and, by default, uses a single vector. However, if you work with a collection created externally or want to have the named vector used, you can configure it by providing its name.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Qdrant.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" location=\":memory:\",\n",
|
||||
" collection_name=\"my_documents_2\",\n",
|
||||
" vector_name=\"custom_vector\",\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"As a Langchain user, you won't see any difference whether you use named vectors or not. Qdrant integration will handle the conversion under the hood."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Metadata\n",
|
||||
"\n",
|
||||
"Qdrant stores your vector embeddings along with the optional JSON-like payload. Payloads are optional, but since LangChain assumes the embeddings are generated from the documents, we keep the context data, so you can extract the original texts as well.\n",
|
||||
"\n",
|
||||
"By default, your document is going to be stored in the following payload structure:\n",
|
||||
@@ -639,8 +677,11 @@
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can, however, decide to use different keys for the page content and metadata. That's useful if you already have a collection that you'd like to reuse. You can always change the "
|
||||
]
|
||||
"You can, however, decide to use different keys for the page content and metadata. That's useful if you already have a collection that you'd like to reuse."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "20b588b4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Rockset Vector Search\n",
|
||||
"# Rockset\n",
|
||||
"\n",
|
||||
"[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters. \n",
|
||||
">[Rockset](https://rockset.com/product/) is a real-time analytics database service for serving low latency, high concurrency analytical queries at scale. It builds a Converged Index™ on structured and semi-structured data with an efficient store for vector embeddings. Its support for running SQL on schemaless data makes it a perfect choice for running vector search with metadata filters. \n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use Rockset as a vectorstore in langchain. To get started, make sure you have a Rockset account and an API key available."
|
||||
"This notebook demonstrates how to use `Rockset` as a vectorstore in langchain. To get started, make sure you have a `Rockset` account and an API key available."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "e290ddc0",
|
||||
"metadata": {},
|
||||
@@ -25,7 +23,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7d77bbbe",
|
||||
"metadata": {},
|
||||
@@ -52,7 +49,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "7951c9cd",
|
||||
"metadata": {},
|
||||
@@ -71,7 +67,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8600900d",
|
||||
"metadata": {},
|
||||
@@ -80,12 +75,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "3bf2f818",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Rockset langchain vectorstore"
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -109,7 +103,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "474636a2",
|
||||
"metadata": {},
|
||||
@@ -138,7 +131,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1404cada",
|
||||
"metadata": {},
|
||||
@@ -173,7 +165,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f1290844",
|
||||
"metadata": {},
|
||||
@@ -205,7 +196,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5e15d630",
|
||||
"metadata": {},
|
||||
@@ -243,7 +233,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "0765b822",
|
||||
"metadata": {},
|
||||
@@ -266,7 +255,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "03fa12a9",
|
||||
"metadata": {},
|
||||
@@ -277,6 +265,14 @@
|
||||
"\n",
|
||||
"Keep an eye on https://rockset.com/blog/introducing-vector-search-on-rockset/ for future updates in this space!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2763dddb-e87d-4d3b-b0bf-c246b0573d87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -295,7 +291,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -6,7 +6,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreDB\n",
|
||||
"[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. This tutorial illustrates how to [work with vector data in SingleStoreDB](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html)."
|
||||
">[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. \n",
|
||||
"\n",
|
||||
"This tutorial illustrates how to [work with vector data in SingleStoreDB](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -129,7 +131,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.2"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SKLearnVectorStore\n",
|
||||
"# scikit-learn\n",
|
||||
"\n",
|
||||
"[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n",
|
||||
">[scikit-learn](https://scikit-learn.org/stable/) is an open source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the `SKLearnVectorStore` vector database."
|
||||
]
|
||||
@@ -28,7 +27,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -48,7 +46,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -76,7 +73,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -120,7 +116,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -190,7 +185,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -209,7 +203,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "sofia",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -223,10 +217,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.16"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -7,11 +7,10 @@
|
||||
"source": [
|
||||
"# StarRocks\n",
|
||||
"\n",
|
||||
"[StarRocks | A High-Performance Analytical Database](https://www.starrocks.io/)\n",
|
||||
">[StarRocks](https://www.starrocks.io/) is a High-Performance Analytical Database.\n",
|
||||
"`StarRocks` is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.\n",
|
||||
"\n",
|
||||
"StarRocks is a next-gen sub-second MPP database for full analytics scenarios, including multi-dimensional analytics, real-time analytics and ad-hoc query.\n",
|
||||
"\n",
|
||||
"Usually StarRocks is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.\n",
|
||||
">Usually `StarRocks` is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.\n",
|
||||
"\n",
|
||||
"Here we'll show how to use the StarRocks Vector Store."
|
||||
]
|
||||
@@ -21,8 +20,17 @@
|
||||
"id": "1685854f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"## Import all used modules"
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "311d44bb-4aca-4f3b-8f97-5e1f29238e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install pymysql"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -305,7 +313,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user