mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-12 20:20:08 +00:00
Compare commits
98 Commits
bagatur/rf
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86ca44d451 | ||
|
|
85f5d14cef | ||
|
|
f788d0982d | ||
|
|
c9619349d6 | ||
|
|
c93d9e66e4 | ||
|
|
8955bc1866 | ||
|
|
730c551819 | ||
|
|
7e9e69c758 | ||
|
|
f055f2a1e3 | ||
|
|
92ac0fc9bd | ||
|
|
fb3df898b5 | ||
|
|
9d145b9630 | ||
|
|
22fa32e164 | ||
|
|
d3520a784f | ||
|
|
a75b32a54a | ||
|
|
4530d851e4 | ||
|
|
ad50702934 | ||
|
|
68e0ae3286 | ||
|
|
b33d2346db | ||
|
|
f58c40b4e3 | ||
|
|
9ef93ecd7c | ||
|
|
2115fb76de | ||
|
|
af620db9c7 | ||
|
|
398b2b9c51 | ||
|
|
7b1066341b | ||
|
|
d5b2a93c6d | ||
|
|
57c13b4ef8 | ||
|
|
168e9ed3a5 | ||
|
|
1e750f12f6 | ||
|
|
3b3ed72d35 | ||
|
|
e1190c8f3c | ||
|
|
2b87e330b0 | ||
|
|
aeeda370aa | ||
|
|
d2db561347 | ||
|
|
f5ff7f178b | ||
|
|
753edf9c80 | ||
|
|
aa358f2be4 | ||
|
|
60103fc4a5 | ||
|
|
4964ba74db | ||
|
|
d90379210a | ||
|
|
987099cfcd | ||
|
|
0cd3f93361 | ||
|
|
5d4133d82f | ||
|
|
bcac6c3aff | ||
|
|
efb4c12abe | ||
|
|
9ac302cb97 | ||
|
|
7ee2822ec2 | ||
|
|
3b7b933aa2 | ||
|
|
3c42bf8d97 | ||
|
|
4bb3d5c488 | ||
|
|
f824f6d925 | ||
|
|
f9aea3db07 | ||
|
|
9eda8f2fe8 | ||
|
|
86326269a1 | ||
|
|
4c97a9ee53 | ||
|
|
0deb98ac0c | ||
|
|
75c7c3a1a7 | ||
|
|
abe7566d7d | ||
|
|
360a70c8a8 | ||
|
|
1c2b9cc9ab | ||
|
|
401d469a92 | ||
|
|
b108b4d010 | ||
|
|
976b456619 | ||
|
|
5da7eb97cb | ||
|
|
a7b4175091 | ||
|
|
12e0c28a6e | ||
|
|
a349fce880 | ||
|
|
7545b1d29b | ||
|
|
d5be160af0 | ||
|
|
cd6812342e | ||
|
|
abb3066150 | ||
|
|
bf7763d9b0 | ||
|
|
59d7adff8f | ||
|
|
60db79a38a | ||
|
|
bc4cd9c5cc | ||
|
|
cb6cf4b631 | ||
|
|
0bce28cd30 | ||
|
|
8711c61298 | ||
|
|
3ab49c0036 | ||
|
|
61daa16e5d | ||
|
|
51e75cf59d | ||
|
|
6a1a0d977a | ||
|
|
dd4d4411c9 | ||
|
|
b03c801523 | ||
|
|
41f7620989 | ||
|
|
066a5a209f | ||
|
|
9b3a025f9c | ||
|
|
ad7f2ec67d | ||
|
|
bd5c92a113 | ||
|
|
a4bcb45f65 | ||
|
|
7193634ae6 | ||
|
|
1fcf875fe3 | ||
|
|
255ad39ae3 | ||
|
|
c2d43544cc | ||
|
|
3c917204dc | ||
|
|
8698cb9b28 | ||
|
|
710197e18c | ||
|
|
48d6ea427f |
11
.github/scripts/check_diff.py
vendored
11
.github/scripts/check_diff.py
vendored
@@ -15,6 +15,10 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
dependents = defaultdict(set)
|
||||
|
||||
@@ -53,10 +57,11 @@ if __name__ == "__main__":
|
||||
}
|
||||
docs_edited = False
|
||||
|
||||
if len(files) == 300:
|
||||
if len(files) >= 300:
|
||||
# max diff length is 300 files - there are likely files missing
|
||||
raise ValueError("Max diff reached. Please manually run CI on changed libs.")
|
||||
|
||||
dirs_to_run["lint"] = all_package_dirs()
|
||||
dirs_to_run["test"] = all_package_dirs()
|
||||
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
|
||||
6
.github/workflows/_release.yml
vendored
6
.github/workflows/_release.yml
vendored
@@ -202,7 +202,7 @@ jobs:
|
||||
poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
|
||||
|
||||
- name: Import test dependencies
|
||||
run: poetry install --with test,test_integration
|
||||
run: poetry install --with test
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
# Overwrite the local version of the package with the test PyPI version.
|
||||
@@ -245,6 +245,10 @@ jobs:
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Import integration test dependencies
|
||||
run: poetry install --with test,test_integration
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
env:
|
||||
|
||||
@@ -123,7 +123,7 @@ Please see [here](https://python.langchain.com) for full documentation, which in
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,12 +10,21 @@ from pathlib import Path
|
||||
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
||||
|
||||
import toml
|
||||
import typing_extensions
|
||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||
from pydantic import BaseModel
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
ClassKind = Literal[
|
||||
"TypedDict",
|
||||
"Regular",
|
||||
"Pydantic",
|
||||
"enum",
|
||||
"RunnablePydantic",
|
||||
"RunnableNonPydantic",
|
||||
]
|
||||
|
||||
|
||||
class ClassInfo(TypedDict):
|
||||
@@ -69,8 +78,36 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
if type(type_) == typing._TypedDictMeta: # type: ignore
|
||||
# The clasification of the class is used to select a template
|
||||
# for the object when rendering the documentation.
|
||||
# See `templates` directory for defined templates.
|
||||
# This is a hacky solution to distinguish between different
|
||||
# kinds of thing that we want to render.
|
||||
if type(type_) is typing_extensions._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif type(type_) is typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# RunnableSerializable subclasses from Pydantic which
|
||||
# for which we use autodoc_pydantic for rendering.
|
||||
# We need to distinguish these from regular Pydantic
|
||||
# classes so we can hide inherited Runnable methods
|
||||
# and provide a link to the Runnable interface from
|
||||
# the template.
|
||||
kind = "RunnablePydantic"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and not issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# These are not pydantic classes but are Runnable.
|
||||
# We'll hide all the inherited methods from Runnable
|
||||
# but use a regular class template to render.
|
||||
kind = "RunnableNonPydantic"
|
||||
elif issubclass(type_, Enum):
|
||||
kind = "enum"
|
||||
elif issubclass(type_, BaseModel):
|
||||
@@ -251,6 +288,10 @@ Classes
|
||||
template = "enum.rst"
|
||||
elif class_["kind"] == "Pydantic":
|
||||
template = "pydantic.rst"
|
||||
elif class_["kind"] == "RunnablePydantic":
|
||||
template = "runnable_pydantic.rst"
|
||||
elif class_["kind"] == "RunnableNonPydantic":
|
||||
template = "runnable_non_pydantic.rst"
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -33,4 +33,4 @@
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
.. example_links:: {{ objname }}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
|
||||
|
||||
|
||||
{% block attributes %}
|
||||
{% endblock %}
|
||||
|
||||
39
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
39
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
@@ -0,0 +1,39 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
22
docs/api_reference/templates/runnable_pydantic.rst
Normal file
22
docs/api_reference/templates/runnable_pydantic.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
:model-show-json: False
|
||||
:model-show-config-summary: False
|
||||
:model-show-validator-members: False
|
||||
:model-show-field-summary: False
|
||||
:field-signature-prefix: param
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, invoke, ainvoke, batch, abatch, batch_as_completed, abatch_as_completed, astream_log, stream, astream, astream_events, transform, atransform, get_output_schema, get_prompts, configurable_fields, configurable_alternatives, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -2,132 +2,129 @@
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
|
||||
<!--[if gt IE 8]><!-->
|
||||
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="https://api.python.langchain.com/en/latest/{{pagename}}.html" />
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical"
|
||||
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet" href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}" type="text/css" />
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
<link rel="stylesheet"
|
||||
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
|
||||
type="text/css"/>
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
|
||||
type="text/css"{% if css.title is not none %}
|
||||
title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
|
||||
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
<div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
|
||||
{%- if prev %}
|
||||
<a href="{{ prev.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ prev.title|striptags }}">Prev</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Prev</a>
|
||||
{%- endif %}
|
||||
{%- if parents -%}
|
||||
<a href="{{ parents[-1].link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ parents[-1].title|striptags }}">Up</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink disabled py-1">Up</a>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<a href="{{ next.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ next.title|striptags }}">Next</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Next</a>
|
||||
{%- endif %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
|
||||
for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}"
|
||||
class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}"
|
||||
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}" class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}" class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}
|
||||
© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}
|
||||
.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated
|
||||
on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
|
||||
@@ -144,8 +144,19 @@ LangChain does not host any Chat Models, rather we rely on third party integrati
|
||||
|
||||
We have some standardized parameters when constructing ChatModels:
|
||||
- `model`: the name of the model
|
||||
- `temperature`: the sampling temperature
|
||||
- `timeout`: request timeout
|
||||
- `max_tokens`: max tokens to generate
|
||||
- `stop`: default stop sequences
|
||||
- `max_retries`: max number of times to retry requests
|
||||
- `api_key`: API key for the model provider
|
||||
- `base_url`: endpoint to send requests to
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration.
|
||||
Some important things to note:
|
||||
- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these.
|
||||
- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``.
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model.
|
||||
|
||||
:::important
|
||||
**Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling.
|
||||
@@ -168,8 +179,15 @@ For a full list of LangChain model providers with multimodal models, [check out
|
||||
### LLMs
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
:::caution
|
||||
Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
|
||||
even for non-chat use cases.
|
||||
|
||||
You are probably looking for [the section above instead](/docs/concepts/#chat-models).
|
||||
:::
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see below).
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
|
||||
This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
|
||||
@@ -857,7 +875,7 @@ The standard interface consists of:
|
||||
The following how-to guides are good practical resources for using function/tool calling:
|
||||
|
||||
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling)
|
||||
|
||||
For a full list of model providers that support tool calling, [see this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -142,7 +142,7 @@
|
||||
"\n",
|
||||
"## Attaching OpenAI tools\n",
|
||||
"\n",
|
||||
"Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling/) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:"
|
||||
"Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -69,6 +69,17 @@
|
||||
"Once we have loaded PDFs into LangChain `Document` objects, we can index them (e.g., a RAG application) in the usual way:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b932bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install faiss-cpu \n",
|
||||
"# use `pip install faiss-gpu` for CUDA GPU support"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -300,7 +300,11 @@
|
||||
"id": "922b48bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Streaming\n",
|
||||
"## Streaming\n",
|
||||
"\n",
|
||||
":::{.callout-note}\n",
|
||||
"[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n",
|
||||
"\n",
|
||||
|
||||
@@ -21,7 +21,7 @@ For comprehensive descriptions of every class and function see the [API Referenc
|
||||
This highlights functionality that is core to using LangChain.
|
||||
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: debug your LLM apps](/docs/how_to/debugging/)
|
||||
|
||||
@@ -79,6 +79,12 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
|
||||
- [How to: force specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### Messages
|
||||
@@ -176,15 +182,17 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
### Tools
|
||||
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call.
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
|
||||
|
||||
### Multimodal
|
||||
|
||||
@@ -225,6 +233,8 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
|
||||
### Serialization
|
||||
- [How to: save and load LangChain objects](/docs/how_to/serialization)
|
||||
|
||||
## Use cases
|
||||
|
||||
@@ -315,4 +325,4 @@ You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/h
|
||||
Evaluating performance is a vital part of building LLM-powered applications.
|
||||
LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
|
||||
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides/evaluation).
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).
|
||||
|
||||
@@ -129,7 +129,7 @@
|
||||
"id": "a531da5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## What is the runnable you are trying wrap?\n",
|
||||
"## What is the runnable you are trying to wrap?\n",
|
||||
"\n",
|
||||
"`RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of:\n",
|
||||
"\n",
|
||||
|
||||
@@ -52,7 +52,12 @@
|
||||
" (\"system\", \"Describe the image provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [{\"type\": \"image_url\", \"image_url\": \"data:image/jpeg;base64,{image_data}\"}],\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
@@ -110,11 +115,11 @@
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data1}\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data1}\"},\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data2}\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data2}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
|
||||
305
docs/docs/how_to/serialization.ipynb
Normal file
305
docs/docs/how_to/serialization.ipynb
Normal file
@@ -0,0 +1,305 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab3dc782-321e-4503-96ee-ac88a15e4b5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to save and load LangChain objects\n",
|
||||
"\n",
|
||||
"LangChain classes implement standard methods for serialization. Serializing LangChain objects using these methods confer some advantages:\n",
|
||||
"\n",
|
||||
"- Secrets, such as API keys, are separated from other parameters and can be loaded back to the object on de-serialization;\n",
|
||||
"- De-serialization is kept compatible across package versions, so objects that were serialized with one version of LangChain can be properly de-serialized with another.\n",
|
||||
"\n",
|
||||
"To save and load LangChain objects using this system, use the `dumpd`, `dumps`, `load`, and `loads` functions in the [load module](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.load) of `langchain-core`. These functions support JSON and JSON-serializable objects.\n",
|
||||
"\n",
|
||||
"All LangChain objects that inherit from [Serializable](https://api.python.langchain.com/en/latest/load/langchain_core.load.serializable.Serializable.html) are JSON-serializable. Examples include [messages](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.messages), [document objects](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) (e.g., as returned from [retrievers](/docs/concepts/#retrievers)), and most [Runnables](/docs/concepts/#langchain-expression-language-lcel), such as chat models, retrievers, and [chains](/docs/how_to/sequence) implemented with the LangChain Expression Language.\n",
|
||||
"\n",
|
||||
"Below we walk through an example with a simple [LLM chain](/docs/tutorials/llm_chain).\n",
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"De-serialization using `load` and `loads` can instantiate any serializable LangChain object. Only use this feature with trusted inputs!\n",
|
||||
"\n",
|
||||
"De-serialization is a beta feature and is subject to change.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "f85d9e51-2a36-4f69-83b1-c716cd43f790",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.load import dumpd, dumps, load, loads\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Translate the following into {language}:\"),\n",
|
||||
" (\"user\", \"{text}\"),\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", api_key=\"llm-api-key\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "356ea99f-5cb5-4433-9a6c-2443d2be9ed3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving objects\n",
|
||||
"\n",
|
||||
"### To json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "26516764-d46b-4357-a6c6-bd8315bfa530",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"schema\",\n",
|
||||
" \"runnable\",\n",
|
||||
" \"RunnableSequence\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"first\": {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"prompts\",\n",
|
||||
" \"chat\",\n",
|
||||
" \"ChatPromptTemplate\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"input_variables\": [\n",
|
||||
" \"language\",\n",
|
||||
" \"text\"\n",
|
||||
" ],\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"string_representation = dumps(chain, pretty=True)\n",
|
||||
"print(string_representation[:500])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bd425716-545d-466b-a4e5-dc9952cfd72a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### To a json-serializable Python dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6561a968-1741-4419-8c29-e705b9d0ef39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dict_representation = dumpd(chain)\n",
|
||||
"\n",
|
||||
"print(type(dict_representation))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "711e986e-dd24-4839-9e38-c57903378a5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### To disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f818378b-f4d6-43a7-895b-76cf7359b157",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"with open(\"/tmp/chain.json\", \"w\") as fp:\n",
|
||||
" json.dump(string_representation, fp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e621a32-ff5f-4627-ad59-88cacba73c6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the API key is withheld from the serialized representations. Parameters that are considered secret are specified by the `.lc_secrets` attribute of the LangChain object:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8225e150-000a-4fbc-9f3d-09568f4b560b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'openai_api_key': 'OPENAI_API_KEY'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.last.lc_secrets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d090177-eb1c-4bfb-8c13-29286afe17d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading objects\n",
|
||||
"\n",
|
||||
"Specifying `secrets_map` in `load` and `loads` will load the corresponding secrets onto the de-serialized LangChain object.\n",
|
||||
"\n",
|
||||
"### From string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "54a66267-5f3a-40a2-bfcc-8b44bb24c154",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = loads(string_representation, secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ed9aff1-92cc-44ba-b2ec-4d12f924fa03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### From dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "76979932-13de-4427-9f88-040fb05a6778",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load(dict_representation, secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7dd81a2a-5163-414d-ab42-f1c35e30471b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### From disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "033f62a7-3377-472a-be58-718baa6ab445",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"/tmp/chain.json\", \"r\") as fp:\n",
|
||||
" chain = loads(json.load(fp), secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc520fdb-035a-468f-a8a8-c3ffe8ed98eb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we recover the API key specified at the start of the guide:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "566b2475-d9b4-432b-8c3b-27c2f183624e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'llm-api-key'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.last.openai_api_key.get_secret_value()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b4cba53-e1d5-4979-927e-b5794a02afc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -351,7 +351,7 @@
|
||||
"id": "ab1b2e7c-6ea8-4674-98eb-a43c69f5c19d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To help enforce proper use of our Python tool, we'll using [tool calling](/docs/how_to/tool_calling/):"
|
||||
"To help enforce proper use of our Python tool, we'll using [tool calling](/docs/how_to/tool_calling):"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -243,7 +243,7 @@
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"You are a \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m expert. Given an input question, creat a syntactically correct \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m query to run.\n",
|
||||
"You are a \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m expert. Given an input question, create a syntactically correct \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m query to run.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most \u001b[33;1m\u001b[1;3m{top_k}\u001b[0m results using the LIMIT clause as per \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
@@ -275,7 +275,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"\"\"You are a {dialect} expert. Given an input question, creat a syntactically correct {dialect} query to run.\n",
|
||||
"system = \"\"\"You are a {dialect} expert. Given an input question, create a syntactically correct {dialect} query to run.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per {dialect}. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
|
||||
@@ -250,7 +250,7 @@
|
||||
"id": "e28c14d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling/) for more details."
|
||||
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -52,8 +52,13 @@
|
||||
"support variants of a tool calling feature.\n",
|
||||
"\n",
|
||||
"LangChain implements standard interfaces for defining tools, passing them to LLMs, \n",
|
||||
"and representing tool calls. This guide will show you how to use them.\n",
|
||||
"\n",
|
||||
"and representing tool calls. This guide and the other How-to pages in the Tool section will show you how to use tools with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing tools to chat models\n",
|
||||
"\n",
|
||||
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
|
||||
@@ -67,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -98,7 +103,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -146,9 +151,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
@@ -167,76 +180,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the `tool_choice` parameter to ensure certain behavior. For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -265,7 +215,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -279,9 +229,8 @@
|
||||
" 'id': 'call_Fl0hQi4IBTzlpaJYlM5kPQhE'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -307,7 +256,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -316,9 +265,8 @@
|
||||
"[Multiply(a=3, b=12), Add(a=11, b=49)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -328,437 +276,21 @@
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"When tools are called in a streaming context, \n",
|
||||
"[message chunks](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"will be populated with [tool call chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
|
||||
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
|
||||
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
|
||||
"integer field `index` that can be used to join chunks together. Fields are optional \n",
|
||||
"because portions of a tool call may be streamed across different chunks (e.g., a chunk \n",
|
||||
"that includes a substring of the arguments may have null values for the tool name and id).\n",
|
||||
"\n",
|
||||
"Because message chunks inherit from their parent message class, an \n",
|
||||
"[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
|
||||
"These fields are parsed best-effort from the message's tool call chunks.\n",
|
||||
"\n",
|
||||
"Note that not all providers currently support streaming for tool calls:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_3aQwTP9CYlFxwOvQZPHDu6wL', 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': ': 3, ', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '\"b\": 1', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '2}', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': 'Add', 'args': '', 'id': 'call_SQUoSsJz2p9Kx2x73GOgN1ja', 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ': 11,', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ' \"b\": ', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '49}', 'id': None, 'index': 1}]\n",
|
||||
"[]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" print(chunk.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that adding message chunks will merge their corresponding tool call chunks. This is the principle by which LangChain's various [tool output parsers](/docs/how_to/output_parser_structured) support streaming.\n",
|
||||
"\n",
|
||||
"For example, below we accumulate tool call chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\"', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, ', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 1', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\"', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11,', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": ', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'str'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_call_chunks[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And below we accumulate tool calls to demonstrate partial parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': {}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 1}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing tool outputs to the model\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
|
||||
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
|
||||
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass back the same `id` in the `ToolMessage` as the what we receive from the model in order to help the model match tool responses with tool calls.\n",
|
||||
"\n",
|
||||
"## Few-shot prompting\n",
|
||||
"\n",
|
||||
"For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n",
|
||||
"\n",
|
||||
"For example, even with some special instructions our model can get tripped up by order of operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 952, 'b': -20},\n",
|
||||
" 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations\"\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 * 8 yet.\n",
|
||||
"\n",
|
||||
"By adding a prompt with some examples we can correct this behavior:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"What's the product of 317253 and 128472 plus four\", name=\"example_user\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[\n",
|
||||
" {\"name\": \"Multiply\", \"args\": {\"x\": 317253, \"y\": 128472}, \"id\": \"1\"}\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054784\", tool_call_id=\"1\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[{\"name\": \"Add\", \"args\": {\"x\": 16505054784, \"y\": 4}, \"id\": \"2\"}],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054788\", tool_call_id=\"2\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"The product of 317253 and 128472 plus four is 16505054788\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"system = \"\"\"You are bad at math but are an expert at using a calculator. \n",
|
||||
"\n",
|
||||
"Use past tool usage as an example of how to correctly use the tools.\"\"\"\n",
|
||||
"few_shot_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system),\n",
|
||||
" *examples,\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = {\"query\": RunnablePassthrough()} | few_shot_prompt | llm_with_tools\n",
|
||||
"chain.invoke(\"Whats 119 times 8 minus 20\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we get the correct output this time.\n",
|
||||
"\n",
|
||||
"Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Binding model-specific formats (advanced)\n",
|
||||
"\n",
|
||||
"Providers adopt different conventions for formatting tool schemas. \n",
|
||||
"For instance, OpenAI uses a format like this:\n",
|
||||
"\n",
|
||||
"- `type`: The type of the tool. At the time of writing, this is always `\"function\"`.\n",
|
||||
"- `function`: An object containing tool parameters.\n",
|
||||
"- `function.name`: The name of the schema to output.\n",
|
||||
"- `function.description`: A high level description of the schema to output.\n",
|
||||
"- `function.parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n",
|
||||
"\n",
|
||||
"We can bind this model-specific format directly to the model as well if preferred. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe', 'function': {'arguments': '{\"a\":119,\"b\":8}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 62, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-353e8a9a-7125-4f94-8c68-4f3da4c21120-0', tool_calls=[{'name': 'multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe'}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"multiply\",\n",
|
||||
" \"description\": \"Multiply two integers together.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"a\": {\"type\": \"number\", \"description\": \"First integer\"},\n",
|
||||
" \"b\": {\"type\": \"number\", \"description\": \"Second integer\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"a\", \"b\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_with_tools.invoke(\"Whats 119 times 8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is functionally equivalent to the `bind_tools()` calls above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've learned how to bind tool schemas to a chat model and to call those tools. Next, check out some more specific uses of tool calling:\n",
|
||||
"Now you've learned how to bind tool schemas to a chat model and to call those tools. Next, you can learn more about how to use tools:\n",
|
||||
"\n",
|
||||
"- Few shot promting [with tools](/docs/how_to/tools_few_shot/)\n",
|
||||
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
|
||||
"- Bind [model-specific tools](/docs/how_to/tools_model_specific/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
@@ -766,24 +298,10 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
108
docs/docs/how_to/tool_calling_parallel.ipynb
Normal file
108
docs/docs/how_to/tool_calling_parallel.ipynb
Normal file
@@ -0,0 +1,108 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Disabling parallel tool calling (OpenAI only)\n",
|
||||
"\n",
|
||||
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First let's set up our tools and model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's show a quick example of how disabling parallel tool calls work:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'add',\n",
|
||||
" 'args': {'a': 2, 'b': 2},\n",
|
||||
" 'id': 'call_Hh4JOTCDM85Sm9Pr84VKrWu5'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)\n",
|
||||
"llm_with_tools.invoke(\"Please call the first tool two times\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, even though we explicitly told the model to call a tool twice, by disabling parallel tool calls the model was constrained to only calling one."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
126
docs/docs/how_to/tool_choice.ipynb
Normal file
126
docs/docs/how_to/tool_choice.ipynb
Normal file
@@ -0,0 +1,126 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to force tool calling behavior\n",
|
||||
"\n",
|
||||
"In order to force our LLM to spelect a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
127
docs/docs/how_to/tool_results_pass_to_model.ipynb
Normal file
127
docs/docs/how_to/tool_results_pass_to_model.ipynb
Normal file
@@ -0,0 +1,127 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass tool outputs to the model\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s. First, let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can use ``ToolMessage`` to pass back the output of the tool calls to the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
|
||||
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
|
||||
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass back the same `id` in the `ToolMessage` as the what we receive from the model in order to help the model match tool responses with tool calls."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to create tools](/docs/how_to/custom_tools)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
@@ -227,7 +227,7 @@
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/).\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling).\n",
|
||||
":::"
|
||||
]
|
||||
}
|
||||
|
||||
235
docs/docs/how_to/tool_streaming.ipynb
Normal file
235
docs/docs/how_to/tool_streaming.ipynb
Normal file
@@ -0,0 +1,235 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream tool calls\n",
|
||||
"\n",
|
||||
"When tools are called in a streaming context, \n",
|
||||
"[message chunks](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"will be populated with [tool call chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
|
||||
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
|
||||
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
|
||||
"integer field `index` that can be used to join chunks together. Fields are optional \n",
|
||||
"because portions of a tool call may be streamed across different chunks (e.g., a chunk \n",
|
||||
"that includes a substring of the arguments may have null values for the tool name and id).\n",
|
||||
"\n",
|
||||
"Because message chunks inherit from their parent message class, an \n",
|
||||
"[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
|
||||
"These fields are parsed best-effort from the message's tool call chunks.\n",
|
||||
"\n",
|
||||
"Note that not all providers currently support streaming for tool calls. Before we start let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's define our query and stream our output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_3aQwTP9CYlFxwOvQZPHDu6wL', 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': ': 3, ', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '\"b\": 1', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '2}', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': 'Add', 'args': '', 'id': 'call_SQUoSsJz2p9Kx2x73GOgN1ja', 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ': 11,', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ' \"b\": ', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '49}', 'id': None, 'index': 1}]\n",
|
||||
"[]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" print(chunk.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that adding message chunks will merge their corresponding tool call chunks. This is the principle by which LangChain's various [tool output parsers](/docs/how_to/output_parser_structured) support streaming.\n",
|
||||
"\n",
|
||||
"For example, below we accumulate tool call chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\"', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, ', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 1', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\"', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11,', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": ', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'str'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_call_chunks[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And below we accumulate tool calls to demonstrate partial parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': {}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 1}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
175
docs/docs/how_to/tools_few_shot.ipynb
Normal file
175
docs/docs/how_to/tools_few_shot.ipynb
Normal file
@@ -0,0 +1,175 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use few-shot prompting with tool calling\n",
|
||||
"\n",
|
||||
"For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n",
|
||||
"\n",
|
||||
"First let's define our tools and model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's run our model where we can notice that even with some special instructions our model can get tripped up by order of operations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 952, 'b': -20},\n",
|
||||
" 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations\"\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 * 8 yet.\n",
|
||||
"\n",
|
||||
"By adding a prompt with some examples we can correct this behavior:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, ToolMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"What's the product of 317253 and 128472 plus four\", name=\"example_user\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[\n",
|
||||
" {\"name\": \"Multiply\", \"args\": {\"x\": 317253, \"y\": 128472}, \"id\": \"1\"}\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054784\", tool_call_id=\"1\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[{\"name\": \"Add\", \"args\": {\"x\": 16505054784, \"y\": 4}, \"id\": \"2\"}],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054788\", tool_call_id=\"2\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"The product of 317253 and 128472 plus four is 16505054788\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"system = \"\"\"You are bad at math but are an expert at using a calculator. \n",
|
||||
"\n",
|
||||
"Use past tool usage as an example of how to correctly use the tools.\"\"\"\n",
|
||||
"few_shot_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system),\n",
|
||||
" *examples,\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = {\"query\": RunnablePassthrough()} | few_shot_prompt | llm_with_tools\n",
|
||||
"chain.invoke(\"Whats 119 times 8 minus 20\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we get the correct output this time.\n",
|
||||
"\n",
|
||||
"Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
79
docs/docs/how_to/tools_model_specific.ipynb
Normal file
79
docs/docs/how_to/tools_model_specific.ipynb
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to bind model-specific tools\n",
|
||||
"\n",
|
||||
"Providers adopt different conventions for formatting tool schemas. \n",
|
||||
"For instance, OpenAI uses a format like this:\n",
|
||||
"\n",
|
||||
"- `type`: The type of the tool. At the time of writing, this is always `\"function\"`.\n",
|
||||
"- `function`: An object containing tool parameters.\n",
|
||||
"- `function.name`: The name of the schema to output.\n",
|
||||
"- `function.description`: A high level description of the schema to output.\n",
|
||||
"- `function.parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n",
|
||||
"\n",
|
||||
"We can bind this model-specific format directly to the model as well if preferred. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe', 'function': {'arguments': '{\"a\":119,\"b\":8}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 62, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-353e8a9a-7125-4f94-8c68-4f3da4c21120-0', tool_calls=[{'name': 'multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe'}])"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"multiply\",\n",
|
||||
" \"description\": \"Multiply two integers together.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"a\": {\"type\": \"number\", \"description\": \"First integer\"},\n",
|
||||
" \"b\": {\"type\": \"number\", \"description\": \"Second integer\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"a\", \"b\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_with_tools.invoke(\"Whats 119 times 8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is functionally equivalent to the `bind_tools()` method."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -19,7 +19,7 @@
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide for more information.\n",
|
||||
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling) guide for more information.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -34,7 +34,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling/).\n",
|
||||
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling).\n",
|
||||
"\n",
|
||||
"We'll do this by simply writing a prompt that will get the model to invoke the appropriate tools. Here's a diagram of the logic:\n",
|
||||
"\n",
|
||||
@@ -87,7 +87,7 @@
|
||||
"id": "7ec6409b-21e5-4d0a-8a46-c4ef0b055dd3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide.\n",
|
||||
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling) guide.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
|
||||
@@ -7,6 +7,19 @@
|
||||
"source": [
|
||||
"# How to trim messages\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Messages](/docs/concepts/#messages)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [Chaining](/docs/how_to/sequence/)\n",
|
||||
"- [Chat history](/docs/concepts/#chat-history)\n",
|
||||
"\n",
|
||||
"The methods in this guide also require `langchain-core>=0.2.9`.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n",
|
||||
"\n",
|
||||
"The `trim_messages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n",
|
||||
@@ -310,7 +323,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A \"polygon\"! Because it\\'s a \"poly-gone\" silent!', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 32, 'total_tokens': 46}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_319be4768e', 'finish_reason': 'stop', 'logprobs': None}, id='run-64cc4575-14d1-4f3f-b4af-97f24758f703-0', usage_metadata={'input_tokens': 32, 'output_tokens': 14, 'total_tokens': 46})"
|
||||
"AIMessage(content='A: A \"Polly-gone\"!', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_66b29dffce', 'finish_reason': 'stop', 'logprobs': None}, id='run-83e96ddf-bcaa-4f63-824c-98b0f8a0d474-0', usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
@@ -378,24 +391,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 9,
|
||||
"id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run c87e2f1b-81ad-4fa7-bfd9-ce6edb29a482 not found for run 7892ee8f-0669-4d6b-a2ca-ef8aae81042a. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"A polygon! Because it's a parrot gone quiet!\", response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 32, 'total_tokens': 43}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_319be4768e', 'finish_reason': 'stop', 'logprobs': None}, id='run-72dad96e-8b58-45f4-8c08-21f9f1a6b68f-0', usage_metadata={'input_tokens': 32, 'output_tokens': 11, 'total_tokens': 43})"
|
||||
"AIMessage(content='A \"polly-no-wanna-cracker\"!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 32, 'total_tokens': 42}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_5bf7397cd3', 'finish_reason': 'stop', 'logprobs': None}, id='run-054dd309-3497-4e7b-b22a-c1859f11d32e-0', usage_metadata={'input_tokens': 32, 'output_tokens': 10, 'total_tokens': 42})"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -409,7 +415,7 @@
|
||||
"\n",
|
||||
"def dummy_get_session_history(session_id):\n",
|
||||
" if session_id != \"1\":\n",
|
||||
" raise InMemoryChatMessageHistory()\n",
|
||||
" return InMemoryChatMessageHistory()\n",
|
||||
" return chat_history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -451,9 +457,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -465,7 +471,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"| [ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [langchain-anthropic](https://api.python.langchain.com/en/latest/anthropic_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
@@ -51,7 +51,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -59,7 +59,7 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"anthropic_API_KEY\"] = getpass.getpass(\"Enter your Anthropic API key: \")"
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass(\"Enter your Anthropic API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -72,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -113,7 +113,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -121,7 +121,7 @@
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(\n",
|
||||
" model=\"claude-3-sonnet-20240229\",\n",
|
||||
" model=\"claude-3-5-sonnet-20240620\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=1024,\n",
|
||||
" timeout=None,\n",
|
||||
@@ -140,7 +140,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -149,10 +149,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'id': 'msg_013qztabaFADNnKsHR1rdrju', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 21}}, id='run-a22ab30c-7e09-48f5-bc27-a08a9d8f7fa1-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'id': 'msg_018Nnu76krRPq8HvgKLW4F8T', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 11}}, id='run-57e9295f-db8a-48dc-9619-babd2bedd891-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -171,7 +171,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -179,9 +179,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Voici la traduction en français :\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -201,17 +199,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', response_metadata={'id': 'msg_01FWrA8w9HbjqYPTQ7VryUnp', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 23, 'output_tokens': 11}}, id='run-b749bf20-b46d-4d62-ac73-f59adab6dd7e-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
|
||||
"AIMessage(content=\"Here's the German translation:\\n\\nIch liebe Programmieren.\", response_metadata={'id': 'msg_01GhkRtQZUkA5Ge9hqmD8HGY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 23, 'output_tokens': 18}}, id='run-da5906b4-b200-4e08-b81a-64d4453643b6-0', usage_metadata={'input_tokens': 23, 'output_tokens': 18, 'total_tokens': 41})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -251,22 +249,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 8,
|
||||
"id": "4a374a24-2534-4e6f-825b-30fab7bbe0cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'text': \"Okay, let's use the GetWeather tool to check the current temperatures in Los Angeles and New York City.\",\n",
|
||||
"[{'text': \"To answer this question, we'll need to check the current weather in both Los Angeles (LA) and New York (NY). I'll use the GetWeather function to retrieve this information for both cities.\",\n",
|
||||
" 'type': 'text'},\n",
|
||||
" {'id': 'toolu_01Tnp5tL7LJZaVyQXKEjbqcC',\n",
|
||||
" {'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A',\n",
|
||||
" 'input': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'name': 'GetWeather',\n",
|
||||
" 'type': 'tool_use'},\n",
|
||||
" {'id': 'toolu_012kz4qHZQqD4qg8sFPeKqpP',\n",
|
||||
" 'input': {'location': 'New York, NY'},\n",
|
||||
" 'name': 'GetWeather',\n",
|
||||
" 'type': 'tool_use'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -288,7 +290,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 9,
|
||||
"id": "6b4a1ead-952c-489f-a8d4-355d3fb55f3f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -297,10 +299,13 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'toolu_01Tnp5tL7LJZaVyQXKEjbqcC'}]"
|
||||
" 'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A'},\n",
|
||||
" {'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'New York, NY'},\n",
|
||||
" 'id': 'toolu_012kz4qHZQqD4qg8sFPeKqpP'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -336,7 +341,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"| [ChatVertexAI](https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://api.python.langchain.com/en/latest/google_vertexai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"\n",
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"Groq chat models support [tool calling](/docs/how_to/tool_calling/) to generate output matching a specific schema. The model may choose to call multiple tools or the same tool multiple times if appropriate.\n",
|
||||
"Groq chat models support [tool calling](/docs/how_to/tool_calling) to generate output matching a specific schema. The model may choose to call multiple tools or the same tool multiple times if appropriate.\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"| [ChatLlamaCpp](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.llamacpp.ChatLlamaCpp.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | \n",
|
||||
"\n",
|
||||
|
||||
190
docs/docs/integrations/chat/oci_generative_ai.ipynb
Normal file
190
docs/docs/integrations/chat/oci_generative_ai.ipynb
Normal file
@@ -0,0 +1,190 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: OCIGenAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatOCIGenAI\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with OCIGenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatOCIGenAI features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI.html).\n",
|
||||
"\n",
|
||||
"Oracle Cloud Infrastructure (OCI) Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases, and which is available through a single API.\n",
|
||||
"Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned custom models based on your own data on dedicated AI clusters. Detailed documentation of the service and API is available __[here](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)__ and __[here](https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai/20231130/)__.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/oci_generative_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOCIGenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access OCIGenAI models you'll need to install the `oci` and `langchain-community` packages.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"The credentials and authentication methods supported for this integration are equivalent to those used with other OCI services and follow the __[standard SDK authentication](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)__ methods, specifically API Key, session token, instance principal, and resource principal.\n",
|
||||
"\n",
|
||||
"API key is the default authentication method used in the examples above. The following example demonstrates how to use a different authentication method (session token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OCIGenAI integration lives in the `langchain-community` package and you will also need to install the `oci` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community oci"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"chat = ChatOCIGenAI(\n",
|
||||
" model_id=\"cohere.command-r-16k\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0.7, \"max_tokens\": 500},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"your are an AI assistant.\"),\n",
|
||||
" AIMessage(content=\"Hi there human!\"),\n",
|
||||
" HumanMessage(content=\"tell me a joke.\"),\n",
|
||||
"]\n",
|
||||
"response = chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"response = chain.invoke({\"topic\": \"dogs\"})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatOCIGenAI features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -41,7 +41,7 @@
|
||||
"| [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain-openai](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
|
||||
@@ -12,6 +12,19 @@
|
||||
"This covers how to load `Microsoft PowerPoint` documents into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aef1500f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install packages\n",
|
||||
"%pip install unstructured\n",
|
||||
"%pip install python-magic\n",
|
||||
"%pip install python-pptx"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
|
||||
@@ -52,67 +52,6 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using in a conversation chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Conversation Chain With Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Bedrock\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = Bedrock(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
" model_id=\"amazon.titan-text-express-v1\",\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -132,22 +71,17 @@
|
||||
" model_id=\"<Custom model ARN>\", # ARN like 'arn:aws:bedrock:...' obtained via provisioning the custom model\n",
|
||||
" model_kwargs={\"temperature\": 1},\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=custom_llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"conversation.predict(input=\"What is the recipe of mayonnaise?\")"
|
||||
"custom_llm.invoke(input=\"What is the recipe of mayonnaise?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Guardrails for Amazon Bedrock example \n",
|
||||
"## Guardrails for Amazon Bedrock\n",
|
||||
"\n",
|
||||
"## Guardrails for Amazon Bedrock (Preview) \n",
|
||||
"[Guardrails for Amazon Bedrock](https://aws.amazon.com/bedrock/guardrails/) evaluates user inputs and model responses based on use case specific policies, and provides an additional layer of safeguards regardless of the underlying model. Guardrails can be applied across models, including Anthropic Claude, Meta Llama 2, Cohere Command, AI21 Labs Jurassic, and Amazon Titan Text, as well as fine-tuned models.\n",
|
||||
"**Note**: Guardrails for Amazon Bedrock is currently in preview and not generally available. Reach out through your usual AWS Support contacts if you’d like access to this feature.\n",
|
||||
"In this section, we are going to set up a Bedrock language model with specific guardrails that include tracing capabilities. "
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
"Oracle Cloud Infrastructure (OCI) Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases, and which is available through a single API.\n",
|
||||
"Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned custom models based on your own data on dedicated AI clusters. Detailed documentation of the service and API is available __[here](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)__ and __[here](https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai/20231130/)__.\n",
|
||||
"\n",
|
||||
"This notebook explains how to use OCI's Genrative AI models with LangChain."
|
||||
"This notebook explains how to use OCI's Generative AI complete models with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisite\n",
|
||||
"We will need to install the oci sdk"
|
||||
"## Setup\n",
|
||||
"Ensure that the oci sdk and the langchain-community package are installed"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,31 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -U oci"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### OCI Generative AI API endpoint \n",
|
||||
"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Authentication\n",
|
||||
"The authentication methods supported for this langchain integration are:\n",
|
||||
"\n",
|
||||
"1. API Key\n",
|
||||
"2. Session token\n",
|
||||
"3. Instance principal\n",
|
||||
"4. Resource principal \n",
|
||||
"\n",
|
||||
"These follows the standard SDK authentication methods detailed __[here](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)__.\n",
|
||||
" "
|
||||
"!pip install -U oci langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,13 +47,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import OCIGenAI\n",
|
||||
"from langchain_community.llms.oci_generative_ai import OCIGenAI\n",
|
||||
"\n",
|
||||
"# use default authN method API-key\n",
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"MY_MODEL\",\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 500},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"Tell me one fact about earth\", temperature=0.7)\n",
|
||||
@@ -85,30 +61,10 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"# Use Session Token to authN\n",
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"MY_MODEL\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" auth_type=\"SECURITY_TOKEN\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name\n",
|
||||
" model_kwargs={\"temperature\": 0.7, \"top_p\": 0.75, \"max_tokens\": 200},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"query\"], template=\"{query}\")\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"\n",
|
||||
"response = llm_chain.invoke(\"what is the capital of france?\")\n",
|
||||
"print(response)"
|
||||
"#### Chaining with prompt templates"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -117,49 +73,95 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OCIGenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"embeddings = OCIGenAIEmbeddings(\n",
|
||||
" model_id=\"MY_EMBEDDING_MODEL\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\n",
|
||||
" \"Larry Ellison co-founded Oracle Corporation in 1977 with Bob Miner and Ed Oates.\",\n",
|
||||
" \"Oracle Corporation is an American multinational computer technology company headquartered in Austin, Texas, United States.\",\n",
|
||||
" ],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
" \n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"MY_MODEL\",\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 500},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"query\"], template=\"{query}\")\n",
|
||||
"llm_chain = prompt | llm\n",
|
||||
"\n",
|
||||
"response = llm_chain.invoke(\"what is the capital of france?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 500},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(chain.invoke(\"when was oracle founded?\"))\n",
|
||||
"print(chain.invoke(\"where is oracle headquartered?\"))"
|
||||
"for chunk in llm.stream(\"Write me a song about sparkling water.\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Authentication\n",
|
||||
"The authentication methods supported for LlamaIndex are equivalent to those used with other OCI services and follow the __[standard SDK authentication](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)__ methods, specifically API Key, session token, instance principal, and resource principal.\n",
|
||||
"\n",
|
||||
"API key is the default authentication method used in the examples above. The following example demonstrates how to use a different authentication method (session token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" auth_type=\"SECURITY_TOKEN\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dedicated AI Cluster\n",
|
||||
"To access models hosted in a dedicated AI cluster __[create an endpoint](https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai-inference/20231130/)__ whose assigned OCID (currently prefixed by ‘ocid1.generativeaiendpoint.oc1.us-chicago-1’) is used as your model ID.\n",
|
||||
"\n",
|
||||
"When accessing models hosted in a dedicated AI cluster you will need to initialize the OCIGenAI interface with two extra required params (\"provider\" and \"context_size\")."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"ocid1.generativeaiendpoint.oc1.us-chicago-1....\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"DEDICATED_COMPARTMENT_OCID\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name,\n",
|
||||
" provider=\"MODEL_PROVIDER\", # e.g., \"cohere\" or \"meta\"\n",
|
||||
" context_size=\"MODEL_CONTEXT_SIZE\", # e.g., 128000\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
24
docs/docs/integrations/providers/ascend.mdx
Normal file
24
docs/docs/integrations/providers/ascend.mdx
Normal file
@@ -0,0 +1,24 @@
|
||||
# Ascend
|
||||
|
||||
>[Ascend](https://https://www.hiascend.com/) is Natural Process Unit provide by Huawei
|
||||
|
||||
This page covers how to use ascend NPU with LangChain.
|
||||
|
||||
### Installation
|
||||
|
||||
Install using torch-npu using:
|
||||
|
||||
```bash
|
||||
pip install torch-npu
|
||||
```
|
||||
|
||||
Please follow the installation instructions as specified below:
|
||||
* Install CANN as shown [here](https://www.hiascend.com/document/detail/zh/canncommercial/700/quickstart/quickstart/quickstart_18_0002.html).
|
||||
|
||||
### Embedding Models
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/ascend).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import AscendEmbeddings
|
||||
```
|
||||
@@ -1,87 +1,111 @@
|
||||
Databricks
|
||||
==========
|
||||
|
||||
The [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.
|
||||
> [Databricks](https://www.databricks.com/) Intelligence Platform is the world's first data intelligence platform powered by generative AI. Infuse AI into every facet of your business.
|
||||
|
||||
Databricks embraces the LangChain ecosystem in various ways:
|
||||
|
||||
1. Databricks connector for the SQLDatabase Chain: SQLDatabase.from_databricks() provides an easy way to query your data on Databricks through LangChain
|
||||
2. Databricks MLflow integrates with LangChain: Tracking and serving LangChain applications with fewer steps
|
||||
3. Databricks as an LLM provider: Deploy your fine-tuned LLMs on Databricks via serving endpoints or cluster driver proxy apps, and query it as langchain.llms.Databricks
|
||||
4. Databricks Dolly: Databricks open-sourced Dolly which allows for commercial use, and can be accessed through the Hugging Face Hub
|
||||
1. 🚀 **Model Serving** - Access state-of-the-art LLMs, such as DBRX, Llama3, Mixtral, or your fine-tuned models on [Databricks Model Serving](https://www.databricks.com/product/model-serving), via a highly available and low-latency inference endpoint. LangChain provides LLM (`Databricks`), Chat Model (`ChatDatabricks`), and Embeddings (`DatabricksEmbeddings`) implementations, streamlining the integration of your models hosted on Databricks Model Serving with your LangChain applications.
|
||||
2. 📃 **Vector Search** - [Databricks Vector Search](https://www.databricks.com/product/machine-learning/vector-search) is a serverless vector database seamlessly integrated within the Databricks Platform. Using `DatabricksVectorSearch`, you can incorporate the highly scalable and reliable similarity search engine into your LangChain applications.
|
||||
3. 📊 **MLflow** - [MLflow](https://mlflow.org/) is an open-source platform to manage full the ML lifecycle, including experiment management, evaluation, tracing, deployment, and more. [MLflow's LangChain Integration](/docs/integrations/providers/mlflow_tracking) streamlines the process of developing and operating modern compound ML systems.
|
||||
4. 🌐 **SQL Database** - [Databricks SQL](https://www.databricks.com/product/databricks-sql) is integrated with `SQLDatabase` in LangChain, allowing you to access the auto-optimizing, exceptionally performant data warehouse.
|
||||
5. 💡 **Open Models** - Databricks open sources models, such as [DBRX](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm), which are available through the [Hugging Face Hub](https://huggingface.co/databricks/dbrx-instruct). These models can be directly utilized with LangChain, leveraging its integration with the `transformers` library.
|
||||
|
||||
Databricks connector for the SQLDatabase Chain
|
||||
----------------------------------------------
|
||||
You can connect to [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the SQLDatabase wrapper of LangChain.
|
||||
Chat Model
|
||||
----------
|
||||
|
||||
`ChatDatabricks` is a Chat Model class to access chat endpoints hosted on Databricks, including state-of-the-art models such as Llama3, Mixtral, and DBRX, as well as your own fine-tuned models.
|
||||
|
||||
Databricks MLflow integrates with LangChain
|
||||
-------------------------------------------
|
||||
```
|
||||
from langchain_community.chat_models.databricks import ChatDatabricks
|
||||
|
||||
MLflow is an open-source platform to manage the ML lifecycle, including experimentation, reproducibility, deployment, and a central model registry. See the notebook [MLflow Callback Handler](/docs/integrations/providers/mlflow_tracking) for details about MLflow's integration with LangChain.
|
||||
|
||||
Databricks provides a fully managed and hosted version of MLflow integrated with enterprise security features, high availability, and other Databricks workspace features such as experiment and run management and notebook revision capture. MLflow on Databricks offers an integrated experience for tracking and securing machine learning model training runs and running machine learning projects. See [MLflow guide](https://docs.databricks.com/mlflow/index.html) for more details.
|
||||
|
||||
Databricks MLflow makes it more convenient to develop LangChain applications on Databricks. For MLflow tracking, you don't need to set the tracking uri. For MLflow Model Serving, you can save LangChain Chains in the MLflow langchain flavor, and then register and serve the Chain with a few clicks on Databricks, with credentials securely managed by MLflow Model Serving.
|
||||
|
||||
Databricks External Models
|
||||
--------------------------
|
||||
|
||||
[Databricks External Models](https://docs.databricks.com/generative-ai/external-models/index.html) is a service that is designed to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests. The following example creates an endpoint that serves OpenAI's GPT-4 model and generates a chat response from it:
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatDatabricks
|
||||
from langchain_core.messages import HumanMessage
|
||||
from mlflow.deployments import get_deploy_client
|
||||
|
||||
|
||||
client = get_deploy_client("databricks")
|
||||
name = f"chat"
|
||||
client.create_endpoint(
|
||||
name=name,
|
||||
config={
|
||||
"served_entities": [
|
||||
{
|
||||
"name": "test",
|
||||
"external_model": {
|
||||
"name": "gpt-4",
|
||||
"provider": "openai",
|
||||
"task": "llm/v1/chat",
|
||||
"openai_config": {
|
||||
"openai_api_key": "{{secrets/<scope>/<key>}}",
|
||||
},
|
||||
},
|
||||
}
|
||||
],
|
||||
},
|
||||
)
|
||||
chat = ChatDatabricks(endpoint=name, temperature=0.1)
|
||||
print(chat([HumanMessage(content="hello")]))
|
||||
# -> content='Hello! How can I assist you today?'
|
||||
chat_model = ChatDatabricks(endpoint="databricks-meta-llama-3-70b-instruct")
|
||||
```
|
||||
|
||||
Databricks Foundation Model APIs
|
||||
--------------------------------
|
||||
See the [usage example](/docs/integrations/chat/databricks) for more guidance on how to use it within your LangChain application.
|
||||
|
||||
[Databricks Foundation Model APIs](https://docs.databricks.com/machine-learning/foundation-models/index.html) allow you to access and query state-of-the-art open source models from dedicated serving endpoints. With Foundation Model APIs, developers can quickly and easily build applications that leverage a high-quality generative AI model without maintaining their own model deployment. The following example uses the `databricks-bge-large-en` endpoint to generate embeddings from text:
|
||||
LLM
|
||||
---
|
||||
|
||||
```python
|
||||
`Databricks` is an LLM class to access completion endpoints hosted on Databricks.
|
||||
|
||||
```
|
||||
from langchain_community.llm.databricks import Databricks
|
||||
|
||||
llm = Databricks(endpoint="your-completion-endpoint")
|
||||
```
|
||||
|
||||
See the [usage example](/docs/integrations/llms/databricks) for more guidance on how to use it within your LangChain application.
|
||||
|
||||
|
||||
Embeddings
|
||||
----------
|
||||
|
||||
`DatabricksEmbeddings` is an Embeddings class to access text-embedding endpoints hosted on Databricks, including state-of-the-art models such as BGE, as well as your own fine-tuned models.
|
||||
|
||||
```
|
||||
from langchain_community.embeddings import DatabricksEmbeddings
|
||||
|
||||
|
||||
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
|
||||
print(embeddings.embed_query("hello")[:3])
|
||||
# -> [0.051055908203125, 0.007221221923828125, 0.003879547119140625, ...]
|
||||
```
|
||||
|
||||
Databricks as an LLM provider
|
||||
-----------------------------
|
||||
|
||||
The notebook [Wrap Databricks endpoints as LLMs](/docs/integrations/llms/databricks#wrapping-a-serving-endpoint-custom-model) demonstrates how to serve a custom model that has been registered by MLflow as a Databricks endpoint.
|
||||
It supports two types of endpoints: the serving endpoint, which is recommended for both production and development, and the cluster driver proxy app, which is recommended for interactive development.
|
||||
See the [usage example](/docs/integrations/text_embedding/databricks) for more guidance on how to use it within your LangChain application.
|
||||
|
||||
|
||||
Databricks Vector Search
|
||||
------------------------
|
||||
Vector Search
|
||||
-------------
|
||||
|
||||
Databricks Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. With Vector Search, you can create auto-updating vector search indexes from Delta tables managed by Unity Catalog and query them with a simple API to return the most similar vectors. See the notebook [Databricks Vector Search](/docs/integrations/vectorstores/databricks_vector_search) for instructions to use it with LangChain.
|
||||
Databricks Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. With Vector Search, you can create auto-updating vector search indexes from [Delta](https://docs.databricks.com/en/introduction/delta-comparison.html) tables managed by [Unity Catalog](https://www.databricks.com/product/unity-catalog) and query them with a simple API to return the most similar vectors.
|
||||
|
||||
```
|
||||
from langchain_community.vectorstores import DatabricksVectorSearch
|
||||
|
||||
dvs = DatabricksVectorSearch(
|
||||
index, text_column="text", embedding=embeddings, columns=["source"]
|
||||
)
|
||||
docs = dvs.similarity_search("What is vector search?)
|
||||
```
|
||||
|
||||
See the [usage example](/docs/integrations/vectorstores/databricks_vector_search) for how to set up vector indices and integrate them with LangChain.
|
||||
|
||||
|
||||
MLflow Integration
|
||||
------------------
|
||||
|
||||
In the context of LangChain integration, MLflow provides the following capabilities:
|
||||
|
||||
- **Experiment Tracking**: Tracks and stores models, artifacts, and traces from your LangChain experiments.
|
||||
- **Dependency Management**: Automatically records dependency libraries, ensuring consistency among development, staging, and production environments.
|
||||
- **Model Evaluation** Offers native capabilities for evaluating LangChain applications.
|
||||
- **Tracing**: Visually traces data flows through your LangChain application.
|
||||
|
||||
See [MLflow LangChain Integration](/docs/integrations/providers/mlflow_tracking) to learn about the full capabilities of using MLflow with LangChain through extensive code examples and guides.
|
||||
|
||||
SQLDatabase
|
||||
-----------
|
||||
You can connect to Databricks SQL using the SQLDatabase wrapper of LangChain.
|
||||
```
|
||||
from langchain.sql_database import SQLDatabase
|
||||
|
||||
db = SQLDatabase.from_databricks(catalog="samples", schema="nyctaxi")
|
||||
```
|
||||
|
||||
See [Databricks SQL Agent](https://docs.databricks.com/en/large-language-models/langchain.html#databricks-sql-agent) for how to connect Databricks SQL with your LangChain Agent as a powerful querying tool.
|
||||
|
||||
Open Models
|
||||
-----------
|
||||
|
||||
To directly integrate Databricks's open models hosted on HuggingFace, you can use the [HuggingFace Integration](/docs/integrations/platforms/huggingface) of LangChain.
|
||||
|
||||
```
|
||||
from langchain_huggingface import HuggingFaceEndpoint
|
||||
|
||||
llm = HuggingFaceEndpoint(
|
||||
repo_id="databricks/dbrx-instruct",
|
||||
task="text-generation",
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
repetition_penalty=1.03,
|
||||
)
|
||||
llm.invoke("What is DBRX model?")
|
||||
```
|
||||
|
||||
@@ -2,27 +2,29 @@
|
||||
|
||||
The `LangChain` integrations related to [Oracle Cloud Infrastructure](https://www.oracle.com/artificial-intelligence/).
|
||||
|
||||
## LLMs
|
||||
|
||||
### OCI Generative AI
|
||||
## OCI Generative AI
|
||||
> Oracle Cloud Infrastructure (OCI) [Generative AI](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm) is a fully managed service that provides a set of state-of-the-art,
|
||||
> customizable large language models (LLMs) that cover a wide range of use cases, and which are available through a single API.
|
||||
> Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned
|
||||
> custom models based on your own data on dedicated AI clusters.
|
||||
|
||||
To use, you should have the latest `oci` python SDK installed.
|
||||
To use, you should have the latest `oci` python SDK and the langchain_community package installed.
|
||||
|
||||
```bash
|
||||
pip install -U oci
|
||||
pip install -U oci langchain-community
|
||||
```
|
||||
|
||||
See [usage examples](/docs/integrations/llms/oci_generative_ai).
|
||||
See [chat](/docs/integrations/llms/oci_generative_ai), [complete](/docs/integrations/chat/oci_generative_ai), and [embedding](/docs/integrations/text_embedding/oci_generative_ai) usage examples.
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatOCIGenAI
|
||||
|
||||
from langchain_community.llms import OCIGenAI
|
||||
|
||||
from langchain_community.embeddings import OCIGenAIEmbeddings
|
||||
```
|
||||
|
||||
### OCI Data Science Model Deployment Endpoint
|
||||
## OCI Data Science Model Deployment Endpoint
|
||||
|
||||
> [OCI Data Science](https://docs.oracle.com/en-us/iaas/data-science/using/home.htm) is a
|
||||
> fully managed and serverless platform for data science teams. Using the OCI Data Science
|
||||
@@ -47,12 +49,3 @@ from langchain_community.llms import OCIModelDeploymentVLLM
|
||||
from langchain_community.llms import OCIModelDeploymentTGI
|
||||
```
|
||||
|
||||
## Text Embedding Models
|
||||
|
||||
### OCI Generative AI
|
||||
|
||||
See [usage examples](/docs/integrations/text_embedding/oci_generative_ai).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import OCIGenAIEmbeddings
|
||||
```
|
||||
21
docs/docs/integrations/providers/pebblo/index.md
Normal file
21
docs/docs/integrations/providers/pebblo/index.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Pebblo
|
||||
|
||||
[Pebblo](https://www.daxa.ai/pebblo) enables developers to safely load and retrieve data to promote their Gen AI app to deployment without
|
||||
worrying about the organization’s compliance and security requirements. The Pebblo SafeLoader identifies semantic topics and entities found in the
|
||||
loaded data and the Pebblo SafeRetriever enforces identity and semantic controls on the retrieved context. The results are
|
||||
summarized on the UI or a PDF report.
|
||||
|
||||
|
||||
## Pebblo Overview:
|
||||
|
||||
`Pebblo` provides a safe way to load and retrieve data for Gen AI applications.
|
||||
It includes:
|
||||
1. **Identity-aware Safe Loader** that loads data and identifies semantic topics and entities.
|
||||
2. **SafeRetrieval** that enforces identity and semantic controls on the retrieved context.
|
||||
3. **User Data Report** that summarizes the data loaded and retrieved.
|
||||
|
||||
## Example Notebooks
|
||||
|
||||
For a more detailed examples of using Pebblo, see the following notebooks:
|
||||
* [PebbloSafeLoader](/docs/integrations/document_loaders/pebblo) shows how to use Pebblo loader to safely load data.
|
||||
* [PebbloRetrievalQA](/docs/integrations/providers/pebblo/pebblo_retrieval_qa) shows how to use Pebblo retrieval QA chain to safely retrieve data.
|
||||
@@ -0,0 +1,584 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3ce451e9-f8f1-4f27-8c6b-4a93a406d504",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Identity-enabled RAG using PebbloRetrievalQA\n",
|
||||
"\n",
|
||||
"> PebbloRetrievalQA is a Retrieval chain with Identity & Semantic Enforcement for question-answering\n",
|
||||
"against a vector database.\n",
|
||||
"\n",
|
||||
"This notebook covers how to retrieve documents using Identity & Semantic Enforcement (Deny Topics/Entities).\n",
|
||||
"For more details on Pebblo and its SafeRetriever feature visit [Pebblo documentation](https://daxa-ai.github.io/pebblo/retrieval_chain/)\n",
|
||||
"\n",
|
||||
"### Steps:\n",
|
||||
"\n",
|
||||
"1. **Loading Documents:**\n",
|
||||
"We will load documents with authorization and semantic metadata into an in-memory Qdrant vectorstore. This vectorstore will be used as a retriever in PebbloRetrievalQA. \n",
|
||||
"\n",
|
||||
"> **Note:** It is recommended to use [PebbloSafeLoader](https://daxa-ai.github.io/pebblo/rag) as the counterpart for loading documents with authentication and semantic metadata on the ingestion side. `PebbloSafeLoader` guarantees the secure and efficient loading of documents while maintaining the integrity of the metadata.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"2. **Testing Enforcement Mechanisms**:\n",
|
||||
" We will test Identity and Semantic Enforcement separately. For each use case, we will define a specific \"ask\" function with the required contexts (*auth_context* and *semantic_context*) and then pose our questions.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ee16b6b-5dac-4b5c-bb69-3ec87398a33c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Dependencies\n",
|
||||
"\n",
|
||||
"We'll use an OpenAI LLM, OpenAI embeddings and a Qdrant vector store in this walkthrough.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e68494fa-f387-4481-9a6c-58294865d7b7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain_core langchain-community langchain-openai qdrant_client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61498d51-0c38-40e2-adcd-19dfdf4d37ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Identity-aware Data Ingestion\n",
|
||||
"\n",
|
||||
"Here we are using Qdrant as a vector database; however, you can use any of the supported vector databases.\n",
|
||||
"\n",
|
||||
"**PebbloRetrievalQA chain supports the following vector databases:**\n",
|
||||
"- Qdrant\n",
|
||||
"- Pinecone\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**Load vector database with authorization and semantic information in metadata:**\n",
|
||||
"\n",
|
||||
"In this step, we capture the authorization and semantic information of the source document into the `authorized_identities`, `pebblo_semantic_topics`, and `pebblo_semantic_entities` fields within the metadata of the VectorDB entry for each chunk. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"*NOTE: To use the PebbloRetrievalQA chain, you must always place authorization and semantic metadata in the specified fields. These fields must contain a list of strings.*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "ae4fcbc1-bdc3-40d2-b2df-8c82cad1f89c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Vectordb loaded.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores.qdrant import Qdrant\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_openai.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"collection_name = \"pebblo-identity-and-semantic-rag\"\n",
|
||||
"\n",
|
||||
"page_content = \"\"\"\n",
|
||||
"**ACME Corp Financial Report**\n",
|
||||
"\n",
|
||||
"**Overview:**\n",
|
||||
"ACME Corp, a leading player in the merger and acquisition industry, presents its financial report for the fiscal year ending December 31, 2020. \n",
|
||||
"Despite a challenging economic landscape, ACME Corp demonstrated robust performance and strategic growth.\n",
|
||||
"\n",
|
||||
"**Financial Highlights:**\n",
|
||||
"Revenue soared to $50 million, marking a 15% increase from the previous year, driven by successful deal closures and expansion into new markets. \n",
|
||||
"Net profit reached $12 million, showcasing a healthy margin of 24%.\n",
|
||||
"\n",
|
||||
"**Key Metrics:**\n",
|
||||
"Total assets surged to $80 million, reflecting a 20% growth, highlighting ACME Corp's strong financial position and asset base. \n",
|
||||
"Additionally, the company maintained a conservative debt-to-equity ratio of 0.5, ensuring sustainable financial stability.\n",
|
||||
"\n",
|
||||
"**Future Outlook:**\n",
|
||||
"ACME Corp remains optimistic about the future, with plans to capitalize on emerging opportunities in the global M&A landscape. \n",
|
||||
"The company is committed to delivering value to shareholders while maintaining ethical business practices.\n",
|
||||
"\n",
|
||||
"**Bank Account Details:**\n",
|
||||
"For inquiries or transactions, please refer to ACME Corp's US bank account:\n",
|
||||
"Account Number: 123456789012\n",
|
||||
"Bank Name: Fictitious Bank of America\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" **{\n",
|
||||
" \"page_content\": page_content,\n",
|
||||
" \"metadata\": {\n",
|
||||
" \"pebblo_semantic_topics\": [\"financial-report\"],\n",
|
||||
" \"pebblo_semantic_entities\": [\"us-bank-account-number\"],\n",
|
||||
" \"authorized_identities\": [\"finance-team\", \"exec-leadership\"],\n",
|
||||
" \"page\": 0,\n",
|
||||
" \"source\": \"https://drive.google.com/file/d/xxxxxxxxxxxxx/view\",\n",
|
||||
" \"title\": \"ACME Corp Financial Report.pdf\",\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"vectordb = Qdrant.from_documents(\n",
|
||||
" documents,\n",
|
||||
" embeddings,\n",
|
||||
" location=\":memory:\",\n",
|
||||
" collection_name=collection_name,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Vectordb loaded.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f630bb8b-67ba-41f9-8715-76d006207e75",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieval with Identity Enforcement\n",
|
||||
"\n",
|
||||
"PebbloRetrievalQA chain uses a SafeRetrieval to enforce that the snippets used for in-context are retrieved only from the documents authorized for the user. \n",
|
||||
"To achieve this, the Gen-AI application needs to provide an authorization context for this retrieval chain. \n",
|
||||
"This *auth_context* should be filled with the identity and authorization groups of the user accessing the Gen-AI app.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Here is the sample code for the `PebbloRetrievalQA` with `user_auth`(List of user authorizations, which may include their User ID and \n",
|
||||
" the groups they are part of) from the user accessing the RAG application, passed in `auth_context`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e978bee6-3a8c-459f-ab82-d380d7499b36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chains import PebbloRetrievalQA\n",
|
||||
"from langchain_community.chains.pebblo_retrieval.models import AuthContext, ChainInput\n",
|
||||
"\n",
|
||||
"# Initialize PebbloRetrievalQA chain\n",
|
||||
"qa_chain = PebbloRetrievalQA.from_chain_type(\n",
|
||||
" llm=llm,\n",
|
||||
" retriever=vectordb.as_retriever(),\n",
|
||||
" app_name=\"pebblo-identity-rag\",\n",
|
||||
" description=\"Identity Enforcement app using PebbloRetrievalQA\",\n",
|
||||
" owner=\"ACME Corp\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def ask(question: str, auth_context: dict):\n",
|
||||
" \"\"\"\n",
|
||||
" Ask a question to the PebbloRetrievalQA chain\n",
|
||||
" \"\"\"\n",
|
||||
" auth_context_obj = AuthContext(**auth_context) if auth_context else None\n",
|
||||
" chain_input_obj = ChainInput(query=question, auth_context=auth_context_obj)\n",
|
||||
" return qa_chain.invoke(chain_input_obj.dict())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7a267e96-70cb-468f-b830-83b65e9b7f6f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1. Questions by Authorized User\n",
|
||||
"\n",
|
||||
"We ingested data for authorized identities `[\"finance-team\", \"exec-leadership\"]`, so a user with the authorized identity/group `finance-team` should receive the correct answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "2688fc18-1eac-45a5-be55-aabbe6b25af5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: \n",
|
||||
"Revenue: $50 million (15% increase from previous year)\n",
|
||||
"Net profit: $12 million (24% margin)\n",
|
||||
"Total assets: $80 million (20% growth)\n",
|
||||
"Debt-to-equity ratio: 0.5\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"finance-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"finance-team\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4db6566-6562-4a49-b19c-6d99299b374e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2. Questions by Unauthorized User\n",
|
||||
"\n",
|
||||
"Since the user's authorized identity/group `eng-support` is not included in the authorized identities `[\"finance-team\", \"exec-leadership\"]`, we should not receive an answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "2d736ce3-6e05-48d3-a5e1-fb4e7cccc1ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: I don't know.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"eng-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"eng-support\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33a8afe1-3071-4118-9714-a17cba809ee4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3. Using PromptTemplate to provide additional instructions\n",
|
||||
"You can use PromptTemplate to provide additional instructions to the LLM for generating a custom response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "59c055ba-fdd1-48c6-9bc9-2793eb47438d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt_template = PromptTemplate.from_template(\n",
|
||||
" \"\"\"\n",
|
||||
"Answer the question using the provided context. \n",
|
||||
"If no context is provided, just say \"I'm sorry, but that information is unavailable, or Access to it is restricted.\".\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"prompt = prompt_template.format(question=question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4d27c00-73d9-4ce8-bc70-29535deaf0e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 3.1 Questions by Authorized User"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "e68a13a4-b735-421d-9655-2a9a087ba9e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: \n",
|
||||
"Revenue soared to $50 million, marking a 15% increase from the previous year, and net profit reached $12 million, showcasing a healthy margin of 24%. Total assets also grew by 20% to $80 million, and the company maintained a conservative debt-to-equity ratio of 0.5.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"finance-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"finance-team\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"resp = ask(prompt, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7b97a9ca-bdc6-400a-923d-65a8536658be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 3.2 Questions by Unauthorized Users"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "438e48c6-96a2-4d5e-81db-47f8c8f37739",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: \n",
|
||||
"I'm sorry, but that information is unavailable, or Access to it is restricted.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"eng-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"eng-support\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"resp = ask(prompt, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4306cab3-d070-405f-a23b-5c6011a61c50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieval with Semantic Enforcement"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c3757cf-832f-483e-aafe-cb09b5130ec0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The PebbloRetrievalQA chain uses SafeRetrieval to ensure that the snippets used in context are retrieved only from documents that comply with the\n",
|
||||
"provided semantic context.\n",
|
||||
"To achieve this, the Gen-AI application must provide a semantic context for this retrieval chain.\n",
|
||||
"This `semantic_context` should include the topics and entities that should be denied for the user accessing the Gen-AI app.\n",
|
||||
"\n",
|
||||
"Below is a sample code for PebbloRetrievalQA with `topics_to_deny` and `entities_to_deny`. These are passed in `semantic_context` to the chain input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "daf37bf7-9a16-4102-8893-5b698cae1b07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain_community.chains import PebbloRetrievalQA\n",
|
||||
"from langchain_community.chains.pebblo_retrieval.models import (\n",
|
||||
" ChainInput,\n",
|
||||
" SemanticContext,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Initialize PebbloRetrievalQA chain\n",
|
||||
"qa_chain = PebbloRetrievalQA.from_chain_type(\n",
|
||||
" llm=llm,\n",
|
||||
" retriever=vectordb.as_retriever(),\n",
|
||||
" app_name=\"pebblo-semantic-rag\",\n",
|
||||
" description=\"Semantic Enforcement app using PebbloRetrievalQA\",\n",
|
||||
" owner=\"ACME Corp\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def ask(\n",
|
||||
" question: str,\n",
|
||||
" topics_to_deny: Optional[List[str]] = None,\n",
|
||||
" entities_to_deny: Optional[List[str]] = None,\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Ask a question to the PebbloRetrievalQA chain\n",
|
||||
" \"\"\"\n",
|
||||
" semantic_context = dict()\n",
|
||||
" if topics_to_deny:\n",
|
||||
" semantic_context[\"pebblo_semantic_topics\"] = {\"deny\": topics_to_deny}\n",
|
||||
" if entities_to_deny:\n",
|
||||
" semantic_context[\"pebblo_semantic_entities\"] = {\"deny\": entities_to_deny}\n",
|
||||
"\n",
|
||||
" semantic_context_obj = (\n",
|
||||
" SemanticContext(**semantic_context) if semantic_context else None\n",
|
||||
" )\n",
|
||||
" chain_input_obj = ChainInput(query=question, semantic_context=semantic_context_obj)\n",
|
||||
" return qa_chain.invoke(chain_input_obj.dict())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9718819b-f5cd-4212-9947-d18cd507c8b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1. Without semantic enforcement\n",
|
||||
"\n",
|
||||
"Since no semantic enforcement is applied, the system should return the answer without excluding any context due to the semantic labels associated with the context.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "69158be1-f223-4d14-b61f-f4afdf5af526",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Topics to deny: []\n",
|
||||
"Entities to deny: []\n",
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"Answer: \n",
|
||||
"Revenue for ACME Corp increased by 15% to $50 million in 2020, with a net profit of $12 million and a strong asset base of $80 million. The company also maintained a conservative debt-to-equity ratio of 0.5.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_to_deny = []\n",
|
||||
"entities_to_deny = []\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, topics_to_deny=topic_to_deny, entities_to_deny=entities_to_deny)\n",
|
||||
"print(\n",
|
||||
" f\"Topics to deny: {topic_to_deny}\\nEntities to deny: {entities_to_deny}\\n\"\n",
|
||||
" f\"Question: {question}\\nAnswer: {resp['result']}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8789c58-0d64-404e-bc09-92f6952022ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2. Deny financial-report topic\n",
|
||||
"\n",
|
||||
"Data has been ingested with the topics: `[\"financial-report\"]`.\n",
|
||||
"Therefore, an app that denies the `financial-report` topic should not receive an answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "9b17b2fc-eefb-4229-a41e-2f943d2eb48e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Topics to deny: ['financial-report']\n",
|
||||
"Entities to deny: []\n",
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"Answer: \n",
|
||||
"\n",
|
||||
"Unfortunately, I do not have access to the financial performance of ACME Corp for the year 2020.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_to_deny = [\"financial-report\"]\n",
|
||||
"entities_to_deny = []\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, topics_to_deny=topic_to_deny, entities_to_deny=entities_to_deny)\n",
|
||||
"print(\n",
|
||||
" f\"Topics to deny: {topic_to_deny}\\nEntities to deny: {entities_to_deny}\\n\"\n",
|
||||
" f\"Question: {question}\\nAnswer: {resp['result']}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "894f21b0-2913-4ef6-b5ed-cbca8f74214d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3. Deny us-bank-account-number entity\n",
|
||||
"Since the entity `us-bank-account-number` is denied, the system should not return the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "2b8abce3-7af3-437f-8999-2866a4b9beda",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Topics to deny: []\n",
|
||||
"Entities to deny: ['us-bank-account-number']\n",
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"Answer: I don't have information about ACME Corp's financial performance for 2020.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_to_deny = []\n",
|
||||
"entities_to_deny = [\"us-bank-account-number\"]\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, topics_to_deny=topic_to_deny, entities_to_deny=entities_to_deny)\n",
|
||||
"print(\n",
|
||||
" f\"Topics to deny: {topic_to_deny}\\nEntities to deny: {entities_to_deny}\\n\"\n",
|
||||
" f\"Question: {question}\\nAnswer: {resp['result']}\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
183
docs/docs/integrations/text_embedding/ascend.ipynb
Normal file
183
docs/docs/integrations/text_embedding/ascend.ipynb
Normal file
@@ -0,0 +1,183 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a636f6f3-00d7-4248-8c36-3da51190e882",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.04053403 -0.05560051 -0.04385472 ... 0.09371872 0.02846981\n",
|
||||
" -0.00576814]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import AscendEmbeddings\n",
|
||||
"\n",
|
||||
"model = AscendEmbeddings(\n",
|
||||
" model_path=\"/root/.cache/modelscope/hub/yangjhchs/acge_text_embedding\",\n",
|
||||
" device_id=0,\n",
|
||||
" query_instruction=\"Represend this sentence for searching relevant passages: \",\n",
|
||||
")\n",
|
||||
"emb = model.embed_query(\"hellow\")\n",
|
||||
"print(emb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8d29ddaa-eef3-4a4e-93d8-0f1c13525fb4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See https://huggingface.co/docs/transformers/troubleshooting#incorrect-output-when-padding-tokens-arent-masked.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[-0.00348254 0.03098977 -0.00203087 ... 0.08492374 0.03970494\n",
|
||||
" -0.03372753]\n",
|
||||
" [-0.02198593 -0.01601127 0.00215684 ... 0.06065163 0.00126425\n",
|
||||
" -0.03634358]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_embs = model.embed_documents(\n",
|
||||
" [\"This is a content of the document\", \"This is another document\"]\n",
|
||||
")\n",
|
||||
"print(doc_embs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "797a720d-c478-4254-be2c-975bc4529f57",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<coroutine object Embeddings.aembed_query at 0x7f9fac699cb0>"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.aembed_query(\"hellow\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "57e62e53-4d2c-4532-9b77-a46bc3da1130",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([-0.04053403, -0.05560051, -0.04385472, ..., 0.09371872,\n",
|
||||
" 0.02846981, -0.00576814], dtype=float32)"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await model.aembed_query(\"hellow\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7e260457-8b50-4ca3-8f76-8a76d8bba8c8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<coroutine object Embeddings.aembed_documents at 0x7fa093ff1a80>"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.aembed_documents(\n",
|
||||
" [\"This is a content of the document\", \"This is another document\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ce954b94-aaac-4d2c-80be-b2988c16af6d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([[-0.00348254, 0.03098977, -0.00203087, ..., 0.08492374,\n",
|
||||
" 0.03970494, -0.03372753],\n",
|
||||
" [-0.02198593, -0.01601127, 0.00215684, ..., 0.06065163,\n",
|
||||
" 0.00126425, -0.03634358]], dtype=float32)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await model.aembed_documents(\n",
|
||||
" [\"This is a content of the document\", \"This is another document\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7823d69d-de79-4f95-90dd-38f4bdeb9bcc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,12 +7,14 @@
|
||||
"source": [
|
||||
"# OVHcloud\n",
|
||||
"\n",
|
||||
"> In order to use this model you need to create a new token on the AI Endpoints website: https://endpoints.ai.cloud.ovh.net/.\n",
|
||||
"\n",
|
||||
"This notebook explains how to use OVHCloudEmbeddings, which is included in the langchain_community package, to embed texts in langchain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "3da0fce0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -35,6 +37,20 @@
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Embedding generated by OVHCloudEmbeddings: {embed}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "47c9af05-4d25-40f2-9305-7bccf1e14c64",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Further reading\n",
|
||||
"- [Enhance your applications with AI Endpoints](https://blog.ovhcloud.com/enhance-your-applications-with-ai-endpoints/)\n",
|
||||
"- [How to use AI Endpoints and LangChain4j](https://blog.ovhcloud.com/how-to-use-ai-endpoints-and-langchain4j/)\n",
|
||||
"- [LLMs streaming with AI Endpoints and LangChain4j](https://blog.ovhcloud.com/llms-streaming-with-ai-endpoints-and-langchain4j/)\n",
|
||||
"- [How to use AI Endpoints and LangChain to create a chatbot](https://blog.ovhcloud.com/how-to-use-ai-endpoints-and-langchain-to-create-a-chatbot/)\n",
|
||||
"- [How to use AI Endpoints, LangChain and Javascript to create a chatbot](https://blog.ovhcloud.com/how-to-use-ai-endpoints-langchain-and-javascript-to-create-a-chatbot/)\n",
|
||||
"- [RAG chatbot using AI Endpoints and LangChain](https://blog.ovhcloud.com/rag-chatbot-using-ai-endpoints-and-langchain/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -186,7 +186,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -195,7 +195,7 @@
|
||||
"BingSearchResults(api_wrapper=BingSearchAPIWrapper(bing_subscription_key='<your subscription key>', bing_search_url='https://api.bing.microsoft.com/v7.0/search', k=10, search_kwargs={}))"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -213,24 +213,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'snippet': 'This chart shows the 14 day <b>weather</b> trend for 33.74°N 89.59°E with daily <b>weather</b> symbols, minimum and maximum temperatures, precipitation amount and probability.. The deviance is coloured within the temperature graph. The stronger the ups and downs, the more uncertain the forecast will be.', 'title': '14 Day Weather 33.74°N 89.59°E - meteoblue', 'link': 'https://www.meteoblue.com/en/weather/14-days/33.739N89.594E6216_Asia%2FShanghai'}\n",
|
||||
"{'snippet': 'Get the monthly <b>weather</b> forecast for Huangpu District, <b>Shanghai</b>, China, including daily high/low, historical averages, to help you plan ahead.', 'title': 'Huangpu District, Shanghai, China Monthly Weather | AccuWeather', 'link': 'https://www.accuweather.com/en/cn/huangpu-district/60782/june-weather/60782'}\n",
|
||||
"{'snippet': '<b>Shanghai</b> Hongqiao Airport is 60 miles from 31°31'42.7"N, 120°24'12.7"E, so the actual climate in 31°31'42.7"N, 120°24'12.7"E can vary a bit. Based on <b>weather</b> reports collected during 1992–2021. Showing: All Year January February March April May June July August September October November December', 'title': 'Climate & Weather Averages in 31°31'42.7"N, 120°24'12.7"E, China', 'link': 'https://www.timeanddate.com/weather/@31.52853,120.40355/climate'}\n",
|
||||
"{'snippet': 'Air Quality gives information using <b>weather</b> conditions, pollutants, and research from The <b>Weather</b> Channel and <b>weather</b>.com ... Today's Air Quality-<b>Shanghai</b>, People's Republic of China. 76.', 'title': 'Shanghai, People's Republic of China Weather', 'link': 'https://weather.com/forecast/air-quality/l/80415bb74d7ded500f89b569c51da53325719ddea6e1394485ad846e812e61d2'}\n"
|
||||
"{'snippet': '<b>Shanghai</b>, <b>Shanghai</b>, China <b>Weather</b> Forecast, with current conditions, wind, air quality, and what to expect for the next 3 days.', 'title': 'Shanghai, Shanghai, China Weather Forecast | AccuWeather', 'link': 'https://www.accuweather.com/en/cn/shanghai/106577/weather-forecast/106577'}\n",
|
||||
"{'snippet': 'Current <b>weather</b> <b>in Shanghai</b> and forecast for today, tomorrow, and next 14 days', 'title': 'Weather for Shanghai, Shanghai Municipality, China - timeanddate.com', 'link': 'https://www.timeanddate.com/weather/china/shanghai'}\n",
|
||||
"{'snippet': '<b>Shanghai</b> 14 Day Extended Forecast. <b>Weather</b> Today <b>Weather</b> Hourly 14 Day Forecast Yesterday/Past <b>Weather</b> Climate (Averages) Currently: 73 °F. Rain showers. Partly sunny. (<b>Weather</b> station: <b>Shanghai</b> Hongqiao Airport, China). See more current <b>weather</b>.', 'title': 'Shanghai, Shanghai Municipality, China 14 day weather forecast', 'link': 'https://www.timeanddate.com/weather/china/shanghai/ext'}\n",
|
||||
"{'snippet': '<b>Shanghai</b> - <b>Weather</b> warnings issued 14-day forecast. <b>Weather</b> warnings issued. Forecast - <b>Shanghai</b>. Day by day forecast. Last updated today at 18:00. Tonight, A clear sky and a gentle breeze. Clear Sky.', 'title': 'Shanghai - BBC Weather', 'link': 'https://www.bbc.com/weather/1796236'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"# .invoke wraps utility.results\n",
|
||||
"response = tool.invoke(\"What is the weather in Shanghai\")\n",
|
||||
"for item in list(response):\n",
|
||||
"response = tool.invoke(\"What is the weather in Shanghai?\")\n",
|
||||
"response = json.loads(response.replace(\"'\", '\"'))\n",
|
||||
"for item in response:\n",
|
||||
" print(item)"
|
||||
]
|
||||
},
|
||||
@@ -255,7 +258,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -269,7 +272,7 @@
|
||||
"Invoking: `bing_search_results_json` with `{'query': 'latest burning man floods'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m[{'snippet': 'Some festivalgoers have shared stories of their successful 6-mile hikes away from <b>Burning</b> <b>Man</b>. The worst of the rain Sunday is expected between 12 p.m. and 4 p.m. local time (3 p.m. to 7 p.m. ET ...', 'title': 'Live updates: Burning Man festival rain strands thousands in ... - CNN', 'link': 'https://www.cnn.com/us/live-news/nevada-desert-burning-man-weather-rain-09-03-23/index.html'}, {'snippet': 'Black Rock Forest, where around 70,000 <b>Burning</b> <b>Man</b> attendees are gathered for the festival, is in the northwest. "Flash <b>flooding</b> caused by excessive rainfall" is possible in parts of eastern ...', 'title': 'Burning Man flooding keeps thousands stranded at Nevada site as ...', 'link': 'https://www.nbcnews.com/news/us-news/live-blog/live-updates-burning-man-flooding-keeps-thousands-stranded-nevada-site-rcna103193'}, {'snippet': 'Thousands of <b>Burning</b> <b>Man</b> attendees finally made their mass exodus after intense rain over the weekend flooded camp sites and filled them with thick, ankle-deep mud – stranding more than 70,000 ...', 'title': 'Burning Man attendees make a mass exodus after a dramatic weekend ... - CNN', 'link': 'https://www.cnn.com/2023/09/05/us/burning-man-storms-shelter-exodus-tuesday/index.html'}, {'snippet': 'You’re going to get stuck,” hosts on <b>Burning</b> <b>Man</b> Information Radio, broadcasting from within the event, told festivalgoers early on Sept. 4. According to NBC News, the Pershing County Sheriff ...', 'title': 'Burning Man attendees make mass exodus after being stranded in ... - TODAY', 'link': 'https://www.today.com/news/what-is-burning-man-flood-death-rcna103231'}]\u001b[0m\u001b[32;1m\u001b[1;3mThe latest Burning Man festival experienced heavy rain and flooding, which resulted in thousands of festivalgoers being stranded. Some attendees had to hike for miles to safety. The rain caused flash flooding in parts of the festival site, including the Black Rock Forest where the event takes place. Campsites were flooded, and thick mud made movement difficult. Eventually, after the intense rain over the weekend, attendees were able to make a mass exodus from the festival. The Pershing County Sheriff's Office warned festivalgoers about the flooding and encouraged them to leave for safety.\u001b[0m\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m[{'snippet': 'Live Updates. Thousands stranded at <b>Burning</b> <b>Man</b> festival after heavy rains. By Maureen Chowdhury, Steve Almasyand Matt Meyer, CNN. Updated 9:00 PM EDT, Sun September 3, 2023. Link Copied!', 'title': 'Thousands stranded at Burning Man festival after heavy rains', 'link': 'https://www.cnn.com/us/live-news/nevada-desert-burning-man-weather-rain-09-03-23/index.html'}, {'snippet': 'Black Rock Forest, where around 70,000 <b>Burning</b> <b>Man</b> attendees are gathered for the festival, is in the northwest. "Flash <b>flooding</b> caused by excessive rainfall" is possible in parts of eastern ...', 'title': 'Burning Man flooding keeps thousands stranded at Nevada site as ...', 'link': 'https://www.nbcnews.com/news/us-news/live-blog/live-updates-burning-man-flooding-keeps-thousands-stranded-nevada-site-rcna103193'}, {'snippet': 'Thousands of <b>Burning</b> <b>Man</b> attendees finally made their mass exodus after intense rain over the weekend flooded camp sites and filled them with thick, ankle-deep mud – stranding more than 70,000 ...', 'title': 'Burning Man attendees make a mass exodus after a dramatic weekend ... - CNN', 'link': 'https://www.cnn.com/2023/09/05/us/burning-man-storms-shelter-exodus-tuesday/index.html'}, {'snippet': 'FILE - In this satellite photo provided by Maxar Technologies, an overview of <b>Burning</b> <b>Man</b> festival in Black Rock, Nev on Monday, Aug. 28, 2023. Authorities in Nevada were investigating a death at the site of the <b>Burning</b> <b>Man</b> festival where thousands of attendees remained stranded as <b>flooding</b> from storms swept through the Nevada desert.', 'title': 'Wait times to exit Burning Man drop after flooding left tens of ...', 'link': 'https://apnews.com/article/burning-man-flooding-nevada-stranded-0726190c9f8378935e2a3cce7f154785'}]\u001b[0m\u001b[32;1m\u001b[1;3mIn the latest Burning Man festival, heavy rains caused flooding and resulted in thousands of attendees being stranded. The festival took place in Black Rock Forest, Nevada, and around 70,000 people were gathered for the event. The excessive rainfall led to flash flooding in some parts of the area. As a result, camp sites were filled with ankle-deep mud, making it difficult for people to leave. Authorities were investigating a death at the festival site, which was affected by the flooding. However, in the following days, thousands of Burning Man attendees were able to make a mass exodus after the rain subsided.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -278,10 +281,10 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What happened in the latest burning man floods?',\n",
|
||||
" 'output': \"The latest Burning Man festival experienced heavy rain and flooding, which resulted in thousands of festivalgoers being stranded. Some attendees had to hike for miles to safety. The rain caused flash flooding in parts of the festival site, including the Black Rock Forest where the event takes place. Campsites were flooded, and thick mud made movement difficult. Eventually, after the intense rain over the weekend, attendees were able to make a mass exodus from the festival. The Pershing County Sheriff's Office warned festivalgoers about the flooding and encouraged them to leave for safety.\"}"
|
||||
" 'output': 'In the latest Burning Man festival, heavy rains caused flooding and resulted in thousands of attendees being stranded. The festival took place in Black Rock Forest, Nevada, and around 70,000 people were gathered for the event. The excessive rainfall led to flash flooding in some parts of the area. As a result, camp sites were filled with ankle-deep mud, making it difficult for people to leave. Authorities were investigating a death at the festival site, which was affected by the flooding. However, in the following days, thousands of Burning Man attendees were able to make a mass exodus after the rain subsided.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -291,7 +294,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain import hub\n",
|
||||
"from langchain.agents import AgentExecutor, create_openai_functions_agent\n",
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain_openai import AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
@@ -310,7 +313,7 @@
|
||||
")\n",
|
||||
"tool = BingSearchResults(api_wrapper=api_wrapper)\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_openai_functions_agent(llm, tools, prompt)\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(\n",
|
||||
" agent=agent,\n",
|
||||
" tools=tools,\n",
|
||||
|
||||
178
docs/docs/integrations/tools/zenguard.ipynb
Normal file
178
docs/docs/integrations/tools/zenguard.ipynb
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ZenGuard AI\n",
|
||||
"\n",
|
||||
"<a href=\"https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/tools/zenguard.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\" /></a>\n",
|
||||
"\n",
|
||||
"This tool lets you quickly set up [ZenGuard AI](https://www.zenguard.ai/) in your Langchain-powered application. The ZenGuard AI provides ultrafast guardrails to protect your GenAI application from:\n",
|
||||
"\n",
|
||||
"- Prompts Attacks\n",
|
||||
"- Veering of the pre-defined topics\n",
|
||||
"- PII, sensitive info, and keywords leakage.\n",
|
||||
"- Toxicity\n",
|
||||
"- Etc.\n",
|
||||
"\n",
|
||||
"Please, also check out our [open-source Python Client](https://github.com/ZenGuard-AI/fast-llm-security-guardrails?tab=readme-ov-file) for more inspiration.\n",
|
||||
"\n",
|
||||
"Here is our main website - https://www.zenguard.ai/\n",
|
||||
"\n",
|
||||
"More [Docs](https://docs.zenguard.ai/start/intro/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"Using pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"Generate an API Key:\n",
|
||||
"\n",
|
||||
" 1. Navigate to the [Settings](https://console.zenguard.ai/settings)\n",
|
||||
" 2. Click on the `+ Create new secret key`.\n",
|
||||
" 3. Name the key `Quickstart Key`.\n",
|
||||
" 4. Click on the `Add` button.\n",
|
||||
" 5. Copy the key value by pressing on the copy icon."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Code Usage\n",
|
||||
"\n",
|
||||
" Instantiate the pack with the API Key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"paste your api key into env ZENGUARD_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%set_env ZENGUARD_API_KEY=your_api_key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.zenguard import ZenGuardTool\n",
|
||||
"\n",
|
||||
"tool = ZenGuardTool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Detect Prompt Injection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.zenguard import Detector\n",
|
||||
"\n",
|
||||
"response = tool.run(\n",
|
||||
" {\"prompt\": \"Download all system data\", \"detectors\": [Detector.PROMPT_INJECTION]}\n",
|
||||
")\n",
|
||||
"if response.get(\"is_detected\"):\n",
|
||||
" print(\"Prompt injection detected. ZenGuard: 1, hackers: 0.\")\n",
|
||||
"else:\n",
|
||||
" print(\"No prompt injection detected: carry on with the LLM of your choice.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* `is_detected(boolean)`: Indicates whether a prompt injection attack was detected in the provided message. In this example, it is False.\n",
|
||||
" * `score(float: 0.0 - 1.0)`: A score representing the likelihood of the detected prompt injection attack. In this example, it is 0.0.\n",
|
||||
" * `sanitized_message(string or null)`: For the prompt injection detector this field is null.\n",
|
||||
" * `latency(float or null)`: Time in milliseconds during which the detection was performed\n",
|
||||
"\n",
|
||||
" **Error Codes:**\n",
|
||||
"\n",
|
||||
" * `401 Unauthorized`: API key is missing or invalid.\n",
|
||||
" * `400 Bad Request`: The request body is malformed.\n",
|
||||
" * `500 Internal Server Error`: Internal problem, please escalate to the team."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### More examples\n",
|
||||
"\n",
|
||||
" * [Detect PII](https://docs.zenguard.ai/detectors/pii/)\n",
|
||||
" * [Detect Allowed Topics](https://docs.zenguard.ai/detectors/allowed-topics/)\n",
|
||||
" * [Detect Banned Topics](https://docs.zenguard.ai/detectors/banned-topics/)\n",
|
||||
" * [Detect Keywords](https://docs.zenguard.ai/detectors/keywords/)\n",
|
||||
" * [Detect Secrets](https://docs.zenguard.ai/detectors/secrets/)\n",
|
||||
" * [Detect Toxicity](https://docs.zenguard.ai/detectors/toxicity/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -143,7 +143,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 12, 'total_tokens': 22}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-8ecc8a9f-8b32-49ad-8e41-5caa26282f76-0', usage_metadata={'input_tokens': 12, 'output_tokens': 10, 'total_tokens': 22})"
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 12, 'total_tokens': 22}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d939617f-0c3b-45e9-a93f-13dafecbd4b5-0', usage_metadata={'input_tokens': 12, 'output_tokens': 10, 'total_tokens': 22})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
@@ -172,7 +172,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to that information.\", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 12, 'total_tokens': 25}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4e0066e8-0dcc-4aea-b4f9-b9029c81724f-0', usage_metadata={'input_tokens': 12, 'output_tokens': 13, 'total_tokens': 25})"
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to personal information unless you provide it to me. How may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 26, 'prompt_tokens': 12, 'total_tokens': 38}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-47bc8c20-af7b-4fd2-9345-f0e9fdf18ce3-0', usage_metadata={'input_tokens': 12, 'output_tokens': 26, 'total_tokens': 38})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -204,7 +204,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Bob. How can I assist you today, Bob?', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 35, 'total_tokens': 49}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-c377d868-1bfe-491a-82fb-1f9122939796-0', usage_metadata={'input_tokens': 35, 'output_tokens': 14, 'total_tokens': 49})"
|
||||
"AIMessage(content='Your name is Bob. How can I help you, Bob?', response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 35, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9f90291b-4df9-41dc-9ecf-1ee1081f4490-0', usage_metadata={'input_tokens': 35, 'output_tokens': 13, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -307,17 +307,10 @@
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 9bdaa45d-604e-4891-9b0a-28754985f10b not found for run 271bd46a-f980-407a-af8a-9399420bce8d. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Bob! How can I assist you today?'"
|
||||
"'Hi Bob! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -339,17 +332,10 @@
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 16482292-535c-449d-8a9d-d0fccf5112eb not found for run 7f2e501a-d5b4-4d8c-924b-aae9eb9d7267. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Your name is Bob. How can I assist you today, Bob?'"
|
||||
"'Your name is Bob. How can I help you today, Bob?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -378,17 +364,10 @@
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run c14d7130-04c5-445f-9e22-442f7c7e8f07 not found for run 946beadc-5cf1-468f-bac4-ca5ddc10ea73. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, I don't know your name as you have not provided it.\""
|
||||
"\"I'm sorry, I cannot determine your name as I am an AI assistant and do not have access to that information.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
@@ -419,13 +398,6 @@
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 4f61611c-3875-4b2d-9f89-af452866d55a not found for run 066a30b1-bbb0-4fee-a035-7fdb41c28d91. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -548,17 +520,10 @@
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 51e624b3-19fd-435f-b580-2a3e4f2d0dc9 not found for run b411f007-b2ad-48c3-968c-aa5ecbb58aea. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Jim! How can I assist you today?'"
|
||||
"'Hello, Jim! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
@@ -580,13 +545,6 @@
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run a30b22cd-698f-48a1-94a0-1a172242e292 not found for run 52b0b60d-5d2a-4610-a572-037602792ad6. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -698,13 +656,6 @@
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run d02b7778-4a91-4831-ace9-b33bb456dc90 not found for run ee0a20dd-5b9e-4862-b3c9-8e2e72b8eb82. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -730,13 +681,6 @@
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 12422d4c-6494-490e-845e-08dcc1c6a4b9 not found for run a82eb759-f51d-4488-871b-6e2d601b4128. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -781,7 +725,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -796,7 +740,7 @@
|
||||
" AIMessage(content='yes!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 34,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -841,16 +785,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, I don't have access to personal information. How can I assist you today?\""
|
||||
"\"I'm sorry, but I don't have access to your personal information. How can I assist you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 35,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -884,7 +828,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -893,7 +837,7 @@
|
||||
"'You asked \"what\\'s 2 + 2?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -917,7 +861,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -932,23 +876,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run e1bb2af3-192b-4bd1-8734-6d2dff1d80b6 not found for run 0c734998-cf16-4708-8658-043a6c7b4a91. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, I don't have access to your name. How can I assist you today?\""
|
||||
"\"I'm sorry, I don't have access to that information. How can I assist you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -974,23 +911,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 181a1f04-9176-4837-80e8-ce74866775a2 not found for run ad402c5a-8341-4c62-ac58-cdf923b3b9ec. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"You haven't asked a math problem yet. Feel free to ask any math question you have, and I'll do my best to help you with it.\""
|
||||
"\"You haven't asked a math problem yet. Feel free to ask any math-related question you have, and I'll be happy to help you with it.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -1029,25 +959,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run e0ee52b6-1261-4f2d-98ca-f78c9019684b not found for run 0f6d7995-c32c-4bdb-b7a6-b3d932c13389. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"|Sure|,| Todd|!| Here|'s| a| joke| for| you|:\n",
|
||||
"\n",
|
||||
"|Why| don|'t| scientists| trust| atoms|?\n",
|
||||
"\n",
|
||||
"|Because| they| make| up| everything|!||"
|
||||
"|Hi| Todd|!| Sure|,| here|'s| a| joke| for| you|:| Why| couldn|'t| the| bicycle| find| its| way| home|?| Because| it| lost| its| bearings|!| 😄||"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1084,9 +1003,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -1098,7 +1017,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -231,7 +231,7 @@
|
||||
"id": "d508b79d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"More commonly, we can \"chain\" the model with this output parser. This means this output parser will get called everytime in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n",
|
||||
"More commonly, we can \"chain\" the model with this output parser. This means this output parser will get called every time in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n",
|
||||
"\n",
|
||||
"We can easily create the chain using the `|` operator. The `|` operator is used in LangChain to combine two elements together."
|
||||
]
|
||||
|
||||
@@ -122,7 +122,7 @@
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4o\"`} />\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" openaiParams={`model=\"gpt-4o\"`} />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -322,7 +322,7 @@
|
||||
"\n",
|
||||
"Now we can build our full QA chain. This is as simple as updating the retriever to be our new `history_aware_retriever`.\n",
|
||||
"\n",
|
||||
"Again, we will use [create_stuff_documents_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) to generate a `question_answer_chain`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer.\n",
|
||||
"Again, we will use [create_stuff_documents_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) to generate a `question_answer_chain`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer. A more detailed explaination is over [here](/docs/tutorials/rag/#built-in-chains)\n",
|
||||
"\n",
|
||||
"We build our final `rag_chain` with [create_retrieval_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html). This chain applies the `history_aware_retriever` and `question_answer_chain` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output."
|
||||
]
|
||||
@@ -760,13 +760,6 @@
|
||||
"id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 0ec120e2-b1fc-4593-9fee-2dd4f4cae256, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -1030,6 +1023,7 @@
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from langgraph.checkpoint.sqlite import SqliteSaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"memory = SqliteSaver.from_conn_string(\":memory:\")\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"To install LangChain run:\n",
|
||||
"This tutorial requires these langchain dependencies:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import Tabs from '@theme/Tabs';\n",
|
||||
@@ -71,10 +71,10 @@
|
||||
"\n",
|
||||
"<Tabs>\n",
|
||||
" <TabItem value=\"pip\" label=\"Pip\" default>\n",
|
||||
" <CodeBlock language=\"bash\">pip install langchain</CodeBlock>\n",
|
||||
" <CodeBlock language=\"bash\">pip install langchain langchain_community langchain_chroma</CodeBlock>\n",
|
||||
" </TabItem>\n",
|
||||
" <TabItem value=\"conda\" label=\"Conda\">\n",
|
||||
" <CodeBlock language=\"bash\">conda install langchain -c conda-forge</CodeBlock>\n",
|
||||
" <CodeBlock language=\"bash\">conda install langchain langchain_community langchain_chroma -c conda-forge</CodeBlock>\n",
|
||||
" </TabItem>\n",
|
||||
"</Tabs>\n",
|
||||
"\n",
|
||||
@@ -124,7 +124,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "26ef9d35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -956,7 +956,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
120
docs/scripts/create_chat_model_docstring_tables.py
Normal file
120
docs/scripts/create_chat_model_docstring_tables.py
Normal file
@@ -0,0 +1,120 @@
|
||||
imperative = [
|
||||
[
|
||||
"invoke",
|
||||
"str | List[dict | tuple | BaseMessage] | PromptValue",
|
||||
"BaseMessage",
|
||||
"A single chat model call.",
|
||||
],
|
||||
[
|
||||
"ainvoke",
|
||||
"'''",
|
||||
"BaseMessage",
|
||||
"Defaults to running invoke in an async executor.",
|
||||
],
|
||||
[
|
||||
"stream",
|
||||
"'''",
|
||||
"Iterator[BaseMessageChunk]",
|
||||
"Defaults to yielding output of invoke.",
|
||||
],
|
||||
[
|
||||
"astream",
|
||||
"'''",
|
||||
"AsyncIterator[BaseMessageChunk]",
|
||||
"Defaults to yielding output of ainvoke.",
|
||||
],
|
||||
[
|
||||
"astream_events",
|
||||
"'''",
|
||||
"AsyncIterator[StreamEvent]",
|
||||
"Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'.",
|
||||
],
|
||||
[
|
||||
"batch",
|
||||
"List[''']",
|
||||
"List[BaseMessage]",
|
||||
"Defaults to running invoke in concurrent threads.",
|
||||
],
|
||||
[
|
||||
"abatch",
|
||||
"List[''']",
|
||||
"List[BaseMessage]",
|
||||
"Defaults to running ainvoke in concurrent threads.",
|
||||
],
|
||||
[
|
||||
"batch_as_completed",
|
||||
"List[''']",
|
||||
"Iterator[Tuple[int, Union[BaseMessage, Exception]]]",
|
||||
"Defaults to running invoke in concurrent threads.",
|
||||
],
|
||||
[
|
||||
"abatch_as_completed",
|
||||
"List[''']",
|
||||
"AsyncIterator[Tuple[int, Union[BaseMessage, Exception]]]",
|
||||
"Defaults to running ainvoke in concurrent threads.",
|
||||
],
|
||||
]
|
||||
declarative = [
|
||||
[
|
||||
"bind_tools",
|
||||
# "Tools, ...",
|
||||
# "Runnable with same inputs/outputs as ChatModel",
|
||||
"Create ChatModel that can call tools.",
|
||||
],
|
||||
[
|
||||
"with_structured_output",
|
||||
# "An output schema, ...",
|
||||
# "Runnable that takes ChatModel inputs and returns a dict or Pydantic object",
|
||||
"Create wrapper that structures model output using schema.",
|
||||
],
|
||||
[
|
||||
"with_retry",
|
||||
# "Max retries, exceptions to handle, ...",
|
||||
# "Runnable with same inputs/outputs as ChatModel",
|
||||
"Create wrapper that retries model calls on failure.",
|
||||
],
|
||||
[
|
||||
"with_fallbacks",
|
||||
# "List of models to fall back on",
|
||||
# "Runnable with same inputs/outputs as ChatModel",
|
||||
"Create wrapper that falls back to other models on failure.",
|
||||
],
|
||||
[
|
||||
"configurable_fields",
|
||||
# "*ConfigurableField",
|
||||
# "Runnable with same inputs/outputs as ChatModel",
|
||||
"Specify init args of the model that can be configured at runtime via the RunnableConfig.",
|
||||
],
|
||||
[
|
||||
"configurable_alternatives",
|
||||
# "ConfigurableField, ...",
|
||||
# "Runnable with same inputs/outputs as ChatModel",
|
||||
"Specify alternative models which can be swapped in at runtime via the RunnableConfig.",
|
||||
],
|
||||
]
|
||||
|
||||
|
||||
def create_table(to_build: list) -> str:
|
||||
for x in to_build:
|
||||
x[0] = "`" + x[0] + "`"
|
||||
longest = [max(len(x[i]) for x in to_build) for i in range(len(to_build[0]))]
|
||||
widths = [int(1.2 * col) for col in longest]
|
||||
headers = (
|
||||
["Method", "Input", "Output", "Description"]
|
||||
if len(widths) == 4
|
||||
else ["Method", "Description"]
|
||||
)
|
||||
rows = [[h + " " * (w - len(h)) for w, h in zip(widths, headers)]]
|
||||
for x in to_build:
|
||||
rows.append([y + " " * (w - len(y)) for w, y in zip(widths, x)])
|
||||
|
||||
table = [" | ".join(([""] + x + [""])).strip() for x in rows]
|
||||
lines = [
|
||||
"+".join(([""] + ["-" * (len(y) + 2) for y in x] + [""])).strip() for x in rows
|
||||
]
|
||||
lines[1] = lines[1].replace("-", "=")
|
||||
lines.append(lines[-1])
|
||||
rst = lines[0]
|
||||
for r, li in zip(table, lines[1:]):
|
||||
rst += "\n" + r + "\n" + li
|
||||
return rst
|
||||
@@ -24,7 +24,7 @@ _IMPORT_RE = re.compile(
|
||||
|
||||
_CURRENT_PATH = Path(__file__).parent.absolute()
|
||||
# Directory where generated markdown files are stored
|
||||
_DOCS_DIR = _CURRENT_PATH / "docs"
|
||||
_DOCS_DIR = _CURRENT_PATH.parent.parent / "docs"
|
||||
|
||||
|
||||
def find_files(path):
|
||||
@@ -75,6 +75,7 @@ def main():
|
||||
|
||||
for file in find_files(args.docs_dir):
|
||||
file_imports = replace_imports(file)
|
||||
print(file)
|
||||
|
||||
if file_imports:
|
||||
# Use relative file path as key
|
||||
|
||||
@@ -232,7 +232,7 @@ def get_chat_model_table() -> str:
|
||||
]
|
||||
title = [
|
||||
"Model",
|
||||
"[Tool calling](/docs/how_to/tool_calling/)",
|
||||
"[Tool calling](/docs/how_to/tool_calling)",
|
||||
"[Structured output](/docs/how_to/structured_output/)",
|
||||
"JSON mode",
|
||||
"Local",
|
||||
|
||||
@@ -7,5 +7,5 @@ langchain-cohere
|
||||
langchain-astradb
|
||||
langchain-nvidia-ai-endpoints
|
||||
langchain-elasticsearch
|
||||
urllib3==1.26.18
|
||||
urllib3==1.26.19
|
||||
nbconvert==7.16.4
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"| [Chat__ModuleName__](https://api.python.langchain.com/en/latest/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | \n",
|
||||
"\n",
|
||||
|
||||
@@ -46,7 +46,7 @@ mwxml>=0.3.3,<0.4
|
||||
newspaper3k>=0.2.8,<0.3
|
||||
numexpr>=2.8.6,<3
|
||||
nvidia-riva-client>=2.14.0,<3
|
||||
oci>=2.119.1,<3
|
||||
oci>=2.128.0,<3
|
||||
openai<2
|
||||
openapi-pydantic>=0.3.2,<0.4
|
||||
oracle-ads>=2.9.1,<3
|
||||
|
||||
@@ -25,6 +25,12 @@ class AINetworkToolkit(BaseToolkit):
|
||||
data associated with this service.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
network: Optional. The network to connect to. Default is "testnet".
|
||||
Options are "mainnet" or "testnet".
|
||||
interface: Optional. The interface to use. If not provided, will
|
||||
attempt to authenticate with the network. Default is None.
|
||||
"""
|
||||
|
||||
network: Optional[Literal["mainnet", "testnet"]] = "testnet"
|
||||
@@ -32,6 +38,7 @@ class AINetworkToolkit(BaseToolkit):
|
||||
|
||||
@root_validator(pre=True)
|
||||
def set_interface(cls, values: dict) -> dict:
|
||||
"""Set the interface if not provided."""
|
||||
if not values.get("interface"):
|
||||
values["interface"] = authenticate(network=values.get("network", "testnet"))
|
||||
return values
|
||||
@@ -39,7 +46,9 @@ class AINetworkToolkit(BaseToolkit):
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
# Allow extra fields. This is needed for the `interface` field.
|
||||
validate_all = True
|
||||
# Allow arbitrary types. This is needed for the `interface` field.
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
|
||||
@@ -16,7 +16,12 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class AmadeusToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with Amadeus which offers APIs for travel."""
|
||||
"""Toolkit for interacting with Amadeus which offers APIs for travel.
|
||||
|
||||
Parameters:
|
||||
client: Optional. The Amadeus client. Default is None.
|
||||
llm: Optional. The language model to use. Default is None.
|
||||
"""
|
||||
|
||||
client: Client = Field(default_factory=authenticate)
|
||||
llm: Optional[BaseLanguageModel] = Field(default=None)
|
||||
@@ -24,6 +29,7 @@ class AmadeusToolkit(BaseToolkit):
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
# Allow extra fields. This is needed for the `client` field.
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Toolkits for agents."""
|
||||
|
||||
from langchain_core.tools import BaseToolkit
|
||||
|
||||
__all__ = ["BaseToolkit"]
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Apache Cassandra Toolkit."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
@@ -14,13 +15,19 @@ from langchain_community.utilities.cassandra_database import CassandraDatabase
|
||||
|
||||
|
||||
class CassandraDatabaseToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with an Apache Cassandra database."""
|
||||
"""Toolkit for interacting with an Apache Cassandra database.
|
||||
|
||||
Parameters:
|
||||
db: CassandraDatabase. The Cassandra database to interact
|
||||
with.
|
||||
"""
|
||||
|
||||
db: CassandraDatabase = Field(exclude=True)
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
# Allow arbitrary types. This is needed for the `db` field.
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
|
||||
@@ -28,6 +28,9 @@ class ClickupToolkit(BaseToolkit):
|
||||
data associated with this service.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
@@ -36,6 +39,14 @@ class ClickupToolkit(BaseToolkit):
|
||||
def from_clickup_api_wrapper(
|
||||
cls, clickup_api_wrapper: ClickupAPIWrapper
|
||||
) -> "ClickupToolkit":
|
||||
"""Create a ClickupToolkit from a ClickupAPIWrapper.
|
||||
|
||||
Args:
|
||||
clickup_api_wrapper: ClickupAPIWrapper. The Clickup API wrapper.
|
||||
|
||||
Returns:
|
||||
ClickupToolkit. The Clickup toolkit.
|
||||
"""
|
||||
operations: List[Dict] = [
|
||||
{
|
||||
"mode": "get_task",
|
||||
|
||||
@@ -12,16 +12,20 @@ from langchain_community.tools.cogniswitch.tool import (
|
||||
|
||||
|
||||
class CogniswitchToolkit(BaseToolkit):
|
||||
"""
|
||||
Toolkit for CogniSwitch.
|
||||
"""Toolkit for CogniSwitch.
|
||||
|
||||
Use the toolkit to get all the tools present in the cogniswitch and
|
||||
use them to interact with your knowledge
|
||||
Use the toolkit to get all the tools present in the Cogniswitch and
|
||||
use them to interact with your knowledge.
|
||||
|
||||
Parameters:
|
||||
cs_token: str. The Cogniswitch token.
|
||||
OAI_token: str. The OpenAI API token.
|
||||
apiKey: str. The Cogniswitch OAuth token.
|
||||
"""
|
||||
|
||||
cs_token: str # cogniswitch token
|
||||
OAI_token: str # OpenAI API token
|
||||
apiKey: str # Cogniswitch OAuth token
|
||||
cs_token: str
|
||||
OAI_token: str
|
||||
apiKey: str
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
|
||||
@@ -9,6 +9,9 @@ from langchain_community.tools.connery import ConneryService
|
||||
class ConneryToolkit(BaseToolkit):
|
||||
"""
|
||||
Toolkit with a list of Connery Actions as tools.
|
||||
|
||||
Parameters:
|
||||
tools (List[BaseTool]): The list of Connery Actions.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool]
|
||||
@@ -23,6 +26,7 @@ class ConneryToolkit(BaseToolkit):
|
||||
def validate_attributes(cls, values: dict) -> dict:
|
||||
"""
|
||||
Validate the attributes of the ConneryToolkit class.
|
||||
|
||||
Parameters:
|
||||
values (dict): The arguments to validate.
|
||||
Returns:
|
||||
@@ -38,9 +42,10 @@ class ConneryToolkit(BaseToolkit):
|
||||
def create_instance(cls, connery_service: ConneryService) -> "ConneryToolkit":
|
||||
"""
|
||||
Creates a Connery Toolkit using a Connery Service.
|
||||
|
||||
Parameters:
|
||||
connery_service (ConneryService): The Connery Service
|
||||
to to get the list of Connery Actions.
|
||||
to to get the list of Connery Actions.
|
||||
Returns:
|
||||
ConneryToolkit: The Connery Toolkit.
|
||||
"""
|
||||
|
||||
@@ -49,6 +49,13 @@ class FileManagementToolkit(BaseToolkit):
|
||||
- Sandbox the agent by running it in a container.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
root_dir: Optional. The root directory to perform file operations.
|
||||
If not provided, file operations are performed relative to the current
|
||||
working directory.
|
||||
selected_tools: Optional. The tools to include in the toolkit. If not
|
||||
provided, all tools are included.
|
||||
"""
|
||||
|
||||
root_dir: Optional[str] = None
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""GitHub Toolkit."""
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
@@ -162,6 +163,9 @@ class GitHubToolkit(BaseToolkit):
|
||||
and comments on GitHub.
|
||||
|
||||
See [Security](https://python.langchain.com/docs/security) for more information.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
@@ -170,6 +174,14 @@ class GitHubToolkit(BaseToolkit):
|
||||
def from_github_api_wrapper(
|
||||
cls, github_api_wrapper: GitHubAPIWrapper
|
||||
) -> "GitHubToolkit":
|
||||
"""Create a GitHubToolkit from a GitHubAPIWrapper.
|
||||
|
||||
Args:
|
||||
github_api_wrapper: GitHubAPIWrapper. The GitHub API wrapper.
|
||||
|
||||
Returns:
|
||||
GitHubToolkit. The GitHub toolkit.
|
||||
"""
|
||||
operations: List[Dict] = [
|
||||
{
|
||||
"mode": "get_issues",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""GitHub Toolkit."""
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
from langchain_core.tools import BaseToolkit
|
||||
@@ -29,6 +30,9 @@ class GitLabToolkit(BaseToolkit):
|
||||
and comments on GitLab.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@@ -38,6 +38,9 @@ class GmailToolkit(BaseToolkit):
|
||||
associated account.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
api_resource: Optional. The Google API resource. Default is None.
|
||||
"""
|
||||
|
||||
api_resource: Resource = Field(default_factory=build_resource_service)
|
||||
|
||||
@@ -22,12 +22,24 @@ class JiraToolkit(BaseToolkit):
|
||||
reading underlying data.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@classmethod
|
||||
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
|
||||
"""Create a JiraToolkit from a JiraAPIWrapper.
|
||||
|
||||
Args:
|
||||
jira_api_wrapper: JiraAPIWrapper. The Jira API wrapper.
|
||||
|
||||
Returns:
|
||||
JiraToolkit. The Jira toolkit.
|
||||
"""
|
||||
|
||||
operations: List[Dict] = [
|
||||
{
|
||||
"mode": "jql",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Json agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
@@ -25,7 +26,23 @@ def create_json_agent(
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct a json agent from an LLM and tools."""
|
||||
"""Construct a json agent from an LLM and tools.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
toolkit: The toolkit to use.
|
||||
callback_manager: The callback manager to use. Default is None.
|
||||
prefix: The prefix to use. Default is JSON_PREFIX.
|
||||
suffix: The suffix to use. Default is JSON_SUFFIX.
|
||||
format_instructions: The format instructions to use. Default is None.
|
||||
input_variables: The input variables to use. Default is None.
|
||||
verbose: Whether to print verbose output. Default is False.
|
||||
agent_executor_kwargs: Optional additional arguments for the agent executor.
|
||||
**kwargs: Additional arguments for the agent.
|
||||
|
||||
Returns:
|
||||
The agent executor.
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
@@ -13,7 +13,11 @@ from langchain_community.tools.json.tool import (
|
||||
|
||||
|
||||
class JsonToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with a JSON spec."""
|
||||
"""Toolkit for interacting with a JSON spec.
|
||||
|
||||
Parameters:
|
||||
spec: The JSON spec.
|
||||
"""
|
||||
|
||||
spec: JsonSpec
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ whether permissions of the given toolkit are appropriate for the application.
|
||||
|
||||
See [Security](https://python.langchain.com/docs/security) for more information.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from typing import Any, Dict, List, Optional, Callable, Tuple
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""MultiOn agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
@@ -14,7 +14,11 @@ from langchain_community.utilities.nasa import NasaAPIWrapper
|
||||
|
||||
|
||||
class NasaToolkit(BaseToolkit):
|
||||
"""Nasa Toolkit."""
|
||||
"""Nasa Toolkit.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
|
||||
@@ -20,7 +20,15 @@ class NLATool(Tool):
|
||||
def from_open_api_endpoint_chain(
|
||||
cls, chain: OpenAPIEndpointChain, api_title: str
|
||||
) -> "NLATool":
|
||||
"""Convert an endpoint chain to an API endpoint tool."""
|
||||
"""Convert an endpoint chain to an API endpoint tool.
|
||||
|
||||
Args:
|
||||
chain: The endpoint chain.
|
||||
api_title: The title of the API.
|
||||
|
||||
Returns:
|
||||
The API endpoint tool.
|
||||
"""
|
||||
expanded_name = (
|
||||
f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}'
|
||||
)
|
||||
@@ -43,7 +51,22 @@ class NLATool(Tool):
|
||||
return_intermediate_steps: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> "NLATool":
|
||||
"""Instantiate the tool from the specified path and method."""
|
||||
"""Instantiate the tool from the specified path and method.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
path: The path of the API.
|
||||
method: The method of the API.
|
||||
spec: The OpenAPI spec.
|
||||
requests: Optional requests object. Default is None.
|
||||
verbose: Whether to print verbose output. Default is False.
|
||||
return_intermediate_steps: Whether to return intermediate steps.
|
||||
Default is False.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
The tool.
|
||||
"""
|
||||
api_operation = APIOperation.from_openapi_spec(spec, path, method)
|
||||
chain = OpenAPIEndpointChain.from_api_operation(
|
||||
api_operation,
|
||||
|
||||
@@ -69,7 +69,18 @@ class NLAToolkit(BaseToolkit):
|
||||
verbose: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> NLAToolkit:
|
||||
"""Instantiate the toolkit by creating tools for each operation."""
|
||||
"""Instantiate the toolkit by creating tools for each operation.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
spec: The OpenAPI spec.
|
||||
requests: Optional requests object. Default is None.
|
||||
verbose: Whether to print verbose output. Default is False.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
The toolkit.
|
||||
"""
|
||||
http_operation_tools = cls._get_http_operation_tools(
|
||||
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
|
||||
)
|
||||
@@ -84,7 +95,19 @@ class NLAToolkit(BaseToolkit):
|
||||
verbose: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> NLAToolkit:
|
||||
"""Instantiate the toolkit from an OpenAPI Spec URL"""
|
||||
"""Instantiate the toolkit from an OpenAPI Spec URL.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
open_api_url: The URL of the OpenAPI spec.
|
||||
requests: Optional requests object. Default is None.
|
||||
verbose: Whether to print verbose output. Default is False.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
The toolkit.
|
||||
"""
|
||||
|
||||
spec = OpenAPISpec.from_url(open_api_url)
|
||||
return cls.from_llm_and_spec(
|
||||
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
|
||||
|
||||
@@ -33,6 +33,9 @@ class O365Toolkit(BaseToolkit):
|
||||
are appropriate for your use case.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
account: Optional. The Office 365 account. Default is None.
|
||||
"""
|
||||
|
||||
account: Account = Field(default_factory=authenticate)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""OpenAPI spec agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
@@ -45,6 +46,28 @@ def create_openapi_agent(
|
||||
what network access it has.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
toolkit: The OpenAPI toolkit.
|
||||
callback_manager: Optional. The callback manager. Default is None.
|
||||
prefix: Optional. The prefix for the prompt. Default is OPENAPI_PREFIX.
|
||||
suffix: Optional. The suffix for the prompt. Default is OPENAPI_SUFFIX.
|
||||
format_instructions: Optional. The format instructions for the prompt.
|
||||
Default is None.
|
||||
input_variables: Optional. The input variables for the prompt. Default is None.
|
||||
max_iterations: Optional. The maximum number of iterations. Default is 15.
|
||||
max_execution_time: Optional. The maximum execution time. Default is None.
|
||||
early_stopping_method: Optional. The early stopping method. Default is "force".
|
||||
verbose: Optional. Whether to print verbose output. Default is False.
|
||||
return_intermediate_steps: Optional. Whether to return intermediate steps.
|
||||
Default is False.
|
||||
agent_executor_kwargs: Optional. Additional keyword arguments
|
||||
for the agent executor.
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
The agent executor.
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import json
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Any, Callable, Dict, List, Optional, cast
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, cast
|
||||
|
||||
import yaml
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
@@ -45,6 +45,8 @@ from langchain_community.utilities.requests import RequestsWrapper
|
||||
MAX_RESPONSE_LENGTH = 5000
|
||||
"""Maximum length of the response to be returned."""
|
||||
|
||||
Operation = Literal["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
|
||||
|
||||
def _get_default_llm_chain(prompt: BasePromptTemplate) -> Any:
|
||||
from langchain.chains.llm import LLMChain
|
||||
@@ -254,25 +256,56 @@ def _create_api_controller_agent(
|
||||
requests_wrapper: RequestsWrapper,
|
||||
llm: BaseLanguageModel,
|
||||
allow_dangerous_requests: bool,
|
||||
allowed_operations: Sequence[Operation],
|
||||
) -> Any:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
|
||||
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
|
||||
tools: List[BaseTool] = [
|
||||
RequestsGetToolWithParsing( # type: ignore[call-arg]
|
||||
tools: List[BaseTool] = []
|
||||
if "GET" in allowed_operations:
|
||||
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
|
||||
tools.append(
|
||||
RequestsGetToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=get_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
)
|
||||
if "POST" in allowed_operations:
|
||||
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
|
||||
tools.append(
|
||||
RequestsPostToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=post_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
)
|
||||
if "PUT" in allowed_operations:
|
||||
put_llm_chain = LLMChain(llm=llm, prompt=PARSING_PUT_PROMPT)
|
||||
tools.append(
|
||||
RequestsPutToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=put_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
)
|
||||
if "DELETE" in allowed_operations:
|
||||
delete_llm_chain = LLMChain(llm=llm, prompt=PARSING_DELETE_PROMPT)
|
||||
RequestsDeleteToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=get_llm_chain,
|
||||
llm_chain=delete_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
),
|
||||
RequestsPostToolWithParsing( # type: ignore[call-arg]
|
||||
)
|
||||
if "PATCH" in allowed_operations:
|
||||
patch_llm_chain = LLMChain(llm=llm, prompt=PARSING_PATCH_PROMPT)
|
||||
RequestsPatchToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=post_llm_chain,
|
||||
llm_chain=patch_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
),
|
||||
]
|
||||
)
|
||||
if not tools:
|
||||
raise ValueError("Tools not found")
|
||||
prompt = PromptTemplate(
|
||||
template=API_CONTROLLER_PROMPT,
|
||||
input_variables=["input", "agent_scratchpad"],
|
||||
@@ -297,6 +330,7 @@ def _create_api_controller_tool(
|
||||
requests_wrapper: RequestsWrapper,
|
||||
llm: BaseLanguageModel,
|
||||
allow_dangerous_requests: bool,
|
||||
allowed_operations: Sequence[Operation],
|
||||
) -> Tool:
|
||||
"""Expose controller as a tool.
|
||||
|
||||
@@ -308,7 +342,7 @@ def _create_api_controller_tool(
|
||||
base_url = api_spec.servers[0]["url"] # TODO: do better.
|
||||
|
||||
def _create_and_run_api_controller_agent(plan_str: str) -> str:
|
||||
pattern = r"\b(GET|POST|PATCH|DELETE)\s+(/\S+)*"
|
||||
pattern = r"\b(GET|POST|PATCH|DELETE|PUT)\s+(/\S+)*"
|
||||
matches = re.findall(pattern, plan_str)
|
||||
endpoint_names = [
|
||||
"{method} {route}".format(method=method, route=route.split("?")[0])
|
||||
@@ -326,7 +360,12 @@ def _create_api_controller_tool(
|
||||
raise ValueError(f"{endpoint_name} endpoint does not exist.")
|
||||
|
||||
agent = _create_api_controller_agent(
|
||||
base_url, docs_str, requests_wrapper, llm, allow_dangerous_requests
|
||||
base_url,
|
||||
docs_str,
|
||||
requests_wrapper,
|
||||
llm,
|
||||
allow_dangerous_requests,
|
||||
allowed_operations,
|
||||
)
|
||||
return agent.run(plan_str)
|
||||
|
||||
@@ -346,6 +385,7 @@ def create_openapi_agent(
|
||||
verbose: bool = True,
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
allow_dangerous_requests: bool = False,
|
||||
allowed_operations: Sequence[Operation] = ("GET", "POST"),
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Construct an OpenAI API planner and controller for a given spec.
|
||||
@@ -363,6 +403,24 @@ def create_openapi_agent(
|
||||
and avoid accepting inputs from untrusted sources without proper sandboxing.
|
||||
Please see: https://python.langchain.com/docs/security
|
||||
for further security information.
|
||||
|
||||
Args:
|
||||
api_spec: The OpenAPI spec.
|
||||
requests_wrapper: The requests wrapper.
|
||||
llm: The language model.
|
||||
shared_memory: Optional. The shared memory. Default is None.
|
||||
callback_manager: Optional. The callback manager. Default is None.
|
||||
verbose: Optional. Whether to print verbose output. Default is True.
|
||||
agent_executor_kwargs: Optional. Additional keyword arguments
|
||||
for the agent executor.
|
||||
allow_dangerous_requests: Optional. Whether to allow dangerous requests.
|
||||
Default is False.
|
||||
allowed_operations: Optional. The allowed operations.
|
||||
Default is ("GET", "POST").
|
||||
**kwargs: Additional arguments.
|
||||
|
||||
Returns:
|
||||
The agent executor.
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
@@ -371,7 +429,11 @@ def create_openapi_agent(
|
||||
tools = [
|
||||
_create_api_planner_tool(api_spec, llm),
|
||||
_create_api_controller_tool(
|
||||
api_spec, requests_wrapper, llm, allow_dangerous_requests
|
||||
api_spec,
|
||||
requests_wrapper,
|
||||
llm,
|
||||
allow_dangerous_requests,
|
||||
allowed_operations,
|
||||
),
|
||||
]
|
||||
prompt = PromptTemplate(
|
||||
|
||||
@@ -12,7 +12,7 @@ class ReducedOpenAPISpec:
|
||||
|
||||
This is a quick and dirty representation for OpenAPI specs.
|
||||
|
||||
Attributes:
|
||||
Parameters:
|
||||
servers: The servers in the spec.
|
||||
description: The description of the spec.
|
||||
endpoints: The endpoints in the spec.
|
||||
@@ -30,6 +30,13 @@ def reduce_openapi_spec(spec: dict, dereference: bool = True) -> ReducedOpenAPIS
|
||||
I want smaller results from retrieval.
|
||||
I was hoping https://openapi.tools/ would have some useful bits
|
||||
to this end, but doesn't seem so.
|
||||
|
||||
Args:
|
||||
spec: The OpenAPI spec.
|
||||
dereference: Whether to dereference the spec. Default is True.
|
||||
|
||||
Returns:
|
||||
ReducedOpenAPISpec: The reduced OpenAPI spec.
|
||||
"""
|
||||
# 1. Consider only get, post, patch, put, delete endpoints.
|
||||
endpoints = [
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Requests toolkit."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, List
|
||||
@@ -40,6 +41,7 @@ class RequestsToolkit(BaseToolkit):
|
||||
"""
|
||||
|
||||
requests_wrapper: TextRequestsWrapper
|
||||
"""The requests wrapper."""
|
||||
allow_dangerous_requests: bool = False
|
||||
"""Allow dangerous requests. See documentation for details."""
|
||||
|
||||
@@ -81,7 +83,9 @@ class OpenAPIToolkit(BaseToolkit):
|
||||
"""
|
||||
|
||||
json_agent: Any
|
||||
"""The JSON agent."""
|
||||
requests_wrapper: TextRequestsWrapper
|
||||
"""The requests wrapper."""
|
||||
allow_dangerous_requests: bool = False
|
||||
"""Allow dangerous requests. See documentation for details."""
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Playwright browser toolkit."""
|
||||
|
||||
from langchain_community.agent_toolkits.playwright.toolkit import (
|
||||
PlayWrightBrowserToolkit,
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Playwright web browser toolkit."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List, Optional, Type, cast
|
||||
@@ -58,6 +59,10 @@ class PlayWrightBrowserToolkit(BaseToolkit):
|
||||
tools.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
sync_browser: Optional. The sync browser. Default is None.
|
||||
async_browser: Optional. The async browser. Default is None.
|
||||
"""
|
||||
|
||||
sync_browser: Optional["SyncBrowser"] = None
|
||||
@@ -103,7 +108,15 @@ class PlayWrightBrowserToolkit(BaseToolkit):
|
||||
sync_browser: Optional[SyncBrowser] = None,
|
||||
async_browser: Optional[AsyncBrowser] = None,
|
||||
) -> PlayWrightBrowserToolkit:
|
||||
"""Instantiate the toolkit."""
|
||||
"""Instantiate the toolkit.
|
||||
|
||||
Args:
|
||||
sync_browser: Optional. The sync browser. Default is None.
|
||||
async_browser: Optional. The async browser. Default is None.
|
||||
|
||||
Returns:
|
||||
The toolkit.
|
||||
"""
|
||||
# This is to raise a better error than the forward ref ones Pydantic would have
|
||||
lazy_import_playwright_browsers()
|
||||
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
||||
|
||||
@@ -13,7 +13,11 @@ from langchain_community.utilities.polygon import PolygonAPIWrapper
|
||||
|
||||
|
||||
class PolygonToolkit(BaseToolkit):
|
||||
"""Polygon Toolkit."""
|
||||
"""Polygon Toolkit.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@@ -21,6 +25,14 @@ class PolygonToolkit(BaseToolkit):
|
||||
def from_polygon_api_wrapper(
|
||||
cls, polygon_api_wrapper: PolygonAPIWrapper
|
||||
) -> "PolygonToolkit":
|
||||
"""Create a Polygon Toolkit from a Polygon API Wrapper.
|
||||
|
||||
Args:
|
||||
polygon_api_wrapper: PolygonAPIWrapper. The Polygon API Wrapper.
|
||||
|
||||
Returns:
|
||||
PolygonToolkit. The Polygon Toolkit.
|
||||
"""
|
||||
tools = [
|
||||
PolygonAggregates(
|
||||
api_wrapper=polygon_api_wrapper,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Power BI agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
@@ -32,7 +33,27 @@ def create_pbi_agent(
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct a Power BI agent from an LLM and tools."""
|
||||
"""Construct a Power BI agent from an LLM and tools.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
toolkit: Optional. The Power BI toolkit. Default is None.
|
||||
powerbi: Optional. The Power BI dataset. Default is None.
|
||||
callback_manager: Optional. The callback manager. Default is None.
|
||||
prefix: Optional. The prefix for the prompt. Default is POWERBI_PREFIX.
|
||||
suffix: Optional. The suffix for the prompt. Default is POWERBI_SUFFIX.
|
||||
format_instructions: Optional. The format instructions for the prompt.
|
||||
Default is None.
|
||||
examples: Optional. The examples for the prompt. Default is None.
|
||||
input_variables: Optional. The input variables for the prompt. Default is None.
|
||||
top_k: Optional. The top k for the prompt. Default is 10.
|
||||
verbose: Optional. Whether to print verbose output. Default is False.
|
||||
agent_executor_kwargs: Optional. The agent executor kwargs. Default is None.
|
||||
kwargs: Any. Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
The agent executor.
|
||||
"""
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Power BI agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
@@ -38,6 +39,25 @@ def create_pbi_chat_agent(
|
||||
"""Construct a Power BI agent from a Chat LLM and tools.
|
||||
|
||||
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
toolkit: Optional. The Power BI toolkit. Default is None.
|
||||
powerbi: Optional. The Power BI dataset. Default is None.
|
||||
callback_manager: Optional. The callback manager. Default is None.
|
||||
output_parser: Optional. The output parser. Default is None.
|
||||
prefix: Optional. The prefix for the prompt. Default is POWERBI_CHAT_PREFIX.
|
||||
suffix: Optional. The suffix for the prompt. Default is POWERBI_CHAT_SUFFIX.
|
||||
examples: Optional. The examples for the prompt. Default is None.
|
||||
input_variables: Optional. The input variables for the prompt. Default is None.
|
||||
memory: Optional. The memory. Default is None.
|
||||
top_k: Optional. The top k for the prompt. Default is 10.
|
||||
verbose: Optional. Whether to print verbose output. Default is False.
|
||||
agent_executor_kwargs: Optional. The agent executor kwargs. Default is None.
|
||||
kwargs: Any. Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
The agent executor.
|
||||
"""
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.conversational_chat.base import ConversationalChatAgent
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# flake8: noqa
|
||||
"""Prompts for PowerBI agent."""
|
||||
|
||||
|
||||
POWERBI_PREFIX = """You are an agent designed to help users interact with a PowerBI Dataset.
|
||||
|
||||
Agent has access to a tool that can write a query based on the question and then run those against PowerBI, Microsofts business intelligence tool. The questions from the users should be interpreted as related to the dataset that is available and not general questions about the world. If the question does not seem related to the dataset, return "This does not appear to be part of this dataset." as the answer.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Toolkit for interacting with a Power BI dataset."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List, Optional, Union
|
||||
@@ -43,6 +44,15 @@ class PowerBIToolkit(BaseToolkit):
|
||||
code are appropriately scoped to the application.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Parameters:
|
||||
powerbi: The Power BI dataset.
|
||||
llm: The language model to use.
|
||||
examples: Optional. The examples for the prompt. Default is None.
|
||||
max_iterations: Optional. The maximum iterations to run. Default is 5.
|
||||
callback_manager: Optional. The callback manager. Default is None.
|
||||
output_token_limit: Optional. The output token limit. Default is None.
|
||||
tiktoken_model_name: Optional. The TikToken model name. Default is None.
|
||||
"""
|
||||
|
||||
powerbi: PowerBIDataset = Field(exclude=True)
|
||||
|
||||
@@ -17,7 +17,11 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class SlackToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with Slack."""
|
||||
"""Toolkit for interacting with Slack.
|
||||
|
||||
Parameters:
|
||||
client: The Slack client.
|
||||
"""
|
||||
|
||||
client: WebClient = Field(default_factory=login)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Spark SQL agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
@@ -30,7 +31,29 @@ def create_spark_sql_agent(
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct a Spark SQL agent from an LLM and tools."""
|
||||
"""Construct a Spark SQL agent from an LLM and tools.
|
||||
|
||||
Args:
|
||||
llm: The language model to use.
|
||||
toolkit: The Spark SQL toolkit.
|
||||
callback_manager: Optional. The callback manager. Default is None.
|
||||
callbacks: Optional. The callbacks. Default is None.
|
||||
prefix: Optional. The prefix for the prompt. Default is SQL_PREFIX.
|
||||
suffix: Optional. The suffix for the prompt. Default is SQL_SUFFIX.
|
||||
format_instructions: Optional. The format instructions for the prompt.
|
||||
Default is None.
|
||||
input_variables: Optional. The input variables for the prompt. Default is None.
|
||||
top_k: Optional. The top k for the prompt. Default is 10.
|
||||
max_iterations: Optional. The maximum iterations to run. Default is 15.
|
||||
max_execution_time: Optional. The maximum execution time. Default is None.
|
||||
early_stopping_method: Optional. The early stopping method. Default is "force".
|
||||
verbose: Optional. Whether to print verbose output. Default is False.
|
||||
agent_executor_kwargs: Optional. The agent executor kwargs. Default is None.
|
||||
kwargs: Any. Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
The agent executor.
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Toolkit for interacting with Spark SQL."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
@@ -16,7 +17,12 @@ from langchain_community.utilities.spark_sql import SparkSQL
|
||||
|
||||
|
||||
class SparkSQLToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with Spark SQL."""
|
||||
"""Toolkit for interacting with Spark SQL.
|
||||
|
||||
Parameters:
|
||||
db: SparkSQL. The Spark SQL database.
|
||||
llm: BaseLanguageModel. The language model.
|
||||
"""
|
||||
|
||||
db: SparkSQL = Field(exclude=True)
|
||||
llm: BaseLanguageModel = Field(exclude=True)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""SQL agent."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Toolkit for interacting with an SQL database."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
@@ -16,7 +17,12 @@ from langchain_community.utilities.sql_database import SQLDatabase
|
||||
|
||||
|
||||
class SQLDatabaseToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with SQL databases."""
|
||||
"""Toolkit for interacting with SQL databases.
|
||||
|
||||
Parameters:
|
||||
db: SQLDatabase. The SQL database.
|
||||
llm: BaseLanguageModel. The language model.
|
||||
"""
|
||||
|
||||
db: SQLDatabase = Field(exclude=True)
|
||||
llm: BaseLanguageModel = Field(exclude=True)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Steam Toolkit."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain_core.tools import BaseToolkit
|
||||
@@ -13,7 +14,11 @@ from langchain_community.utilities.steam import SteamWebAPIWrapper
|
||||
|
||||
|
||||
class SteamToolkit(BaseToolkit):
|
||||
"""Steam Toolkit."""
|
||||
"""Steam Toolkit.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@@ -21,6 +26,14 @@ class SteamToolkit(BaseToolkit):
|
||||
def from_steam_api_wrapper(
|
||||
cls, steam_api_wrapper: SteamWebAPIWrapper
|
||||
) -> "SteamToolkit":
|
||||
"""Create a Steam Toolkit from a Steam API Wrapper.
|
||||
|
||||
Args:
|
||||
steam_api_wrapper: SteamWebAPIWrapper. The Steam API Wrapper.
|
||||
|
||||
Returns:
|
||||
SteamToolkit. The Steam Toolkit.
|
||||
"""
|
||||
operations: List[dict] = [
|
||||
{
|
||||
"mode": "get_games_details",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""[DEPRECATED] Zapier Toolkit."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
@@ -10,7 +11,11 @@ from langchain_community.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
|
||||
class ZapierToolkit(BaseToolkit):
|
||||
"""Zapier Toolkit."""
|
||||
"""Zapier Toolkit.
|
||||
|
||||
Parameters:
|
||||
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
@@ -18,7 +23,14 @@ class ZapierToolkit(BaseToolkit):
|
||||
def from_zapier_nla_wrapper(
|
||||
cls, zapier_nla_wrapper: ZapierNLAWrapper
|
||||
) -> "ZapierToolkit":
|
||||
"""Create a toolkit from a ZapierNLAWrapper."""
|
||||
"""Create a toolkit from a ZapierNLAWrapper.
|
||||
|
||||
Args:
|
||||
zapier_nla_wrapper: ZapierNLAWrapper. The Zapier NLA wrapper.
|
||||
|
||||
Returns:
|
||||
ZapierToolkit. The Zapier toolkit.
|
||||
"""
|
||||
actions = zapier_nla_wrapper.list()
|
||||
tools = [
|
||||
ZapierNLARunAction(
|
||||
@@ -35,7 +47,14 @@ class ZapierToolkit(BaseToolkit):
|
||||
async def async_from_zapier_nla_wrapper(
|
||||
cls, zapier_nla_wrapper: ZapierNLAWrapper
|
||||
) -> "ZapierToolkit":
|
||||
"""Create a toolkit from a ZapierNLAWrapper."""
|
||||
"""Async create a toolkit from a ZapierNLAWrapper.
|
||||
|
||||
Args:
|
||||
zapier_nla_wrapper: ZapierNLAWrapper. The Zapier NLA wrapper.
|
||||
|
||||
Returns:
|
||||
ZapierToolkit. The Zapier toolkit.
|
||||
"""
|
||||
actions = await zapier_nla_wrapper.alist()
|
||||
tools = [
|
||||
ZapierNLARunAction(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user