mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-10 15:06:18 +00:00
ruff: more rules across the board & fixes (#31898)
* standardizes ruff dep version across all `pyproject.toml` files * cli: ruff rules and corrections * langchain: rules and corrections
This commit is contained in:
parent
706a66eccd
commit
e7eac27241
1
Makefile
1
Makefile
@ -71,7 +71,6 @@ spell_fix:
|
||||
lint lint_package lint_tests:
|
||||
uv run --group lint ruff check docs cookbook
|
||||
uv run --group lint ruff format docs cookbook cookbook --diff
|
||||
uv run --group lint ruff check docs cookbook
|
||||
git --no-pager grep 'from langchain import' docs cookbook | grep -vE 'from langchain import (hub)' && echo "Error: no importing langchain from root in docs, except for hub" && exit 1 || exit 0
|
||||
|
||||
git --no-pager grep 'api.python.langchain.com' -- docs/docs ':!docs/docs/additional_resources/arxiv_references.mdx' ':!docs/docs/integrations/document_loaders/sitemap.ipynb' || exit 0 && \
|
||||
|
@ -11,7 +11,9 @@ from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
|
||||
app = typer.Typer(no_args_is_help=True, add_completion=False)
|
||||
app.add_typer(
|
||||
template_namespace.package_cli, name="template", help=template_namespace.__doc__
|
||||
template_namespace.package_cli,
|
||||
name="template",
|
||||
help=template_namespace.__doc__,
|
||||
)
|
||||
app.add_typer(app_namespace.app_cli, name="app", help=app_namespace.__doc__)
|
||||
app.add_typer(
|
||||
@ -32,7 +34,7 @@ app.command(
|
||||
)
|
||||
|
||||
|
||||
def version_callback(show_version: bool) -> None:
|
||||
def version_callback(show_version: bool) -> None: # noqa: FBT001
|
||||
if show_version:
|
||||
typer.echo(f"langchain-cli {__version__}")
|
||||
raise typer.Exit
|
||||
@ -40,8 +42,8 @@ def version_callback(show_version: bool) -> None:
|
||||
|
||||
@app.callback()
|
||||
def main(
|
||||
version: bool = typer.Option(
|
||||
False,
|
||||
version: bool = typer.Option( # noqa: FBT001
|
||||
False, # noqa: FBT003
|
||||
"--version",
|
||||
"-v",
|
||||
help="Print the current CLI version.",
|
||||
@ -56,10 +58,12 @@ def main(
|
||||
def serve(
|
||||
*,
|
||||
port: Annotated[
|
||||
Optional[int], typer.Option(help="The port to run the server on")
|
||||
Optional[int],
|
||||
typer.Option(help="The port to run the server on"),
|
||||
] = None,
|
||||
host: Annotated[
|
||||
Optional[str], typer.Option(help="The host to run the server on")
|
||||
Optional[str],
|
||||
typer.Option(help="The host to run the server on"),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Start the LangServe app, whether it's a template or an app."""
|
||||
|
@ -1,7 +1,7 @@
|
||||
# type: ignore
|
||||
"""Development Scripts for template packages."""
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Literal
|
||||
|
||||
from fastapi import FastAPI
|
||||
from langserve import add_routes
|
||||
@ -12,9 +12,9 @@ from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
def create_demo_server(
|
||||
*,
|
||||
config_keys: Sequence[str] = (),
|
||||
playground_type: str = "default",
|
||||
playground_type: Literal["default", "chat"] = "default",
|
||||
):
|
||||
"""Creates a demo server for the current template."""
|
||||
"""Create a demo server for the current template."""
|
||||
app = FastAPI()
|
||||
package_root = get_package_root()
|
||||
pyproject = package_root / "pyproject.toml"
|
||||
|
@ -39,7 +39,7 @@ lint lint_diff lint_package lint_tests:
|
||||
|
||||
format format_diff:
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I --fix $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --fix $(PYTHON_FILES)
|
||||
|
||||
spell_check:
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
@ -79,7 +79,9 @@ def new(
|
||||
package_prompt = "What package would you like to add? (leave blank to skip)"
|
||||
while True:
|
||||
package_str = typer.prompt(
|
||||
package_prompt, default="", show_default=False
|
||||
package_prompt,
|
||||
default="",
|
||||
show_default=False,
|
||||
)
|
||||
if not package_str:
|
||||
break
|
||||
@ -121,26 +123,29 @@ def new(
|
||||
typer.echo("Then add templates with commands like:\n")
|
||||
typer.echo(" langchain app add extraction-openai-functions")
|
||||
typer.echo(
|
||||
" langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n"
|
||||
" langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n",
|
||||
)
|
||||
|
||||
|
||||
@app_cli.command()
|
||||
def add(
|
||||
dependencies: Annotated[
|
||||
Optional[list[str]], typer.Argument(help="The dependency to add")
|
||||
Optional[list[str]],
|
||||
typer.Argument(help="The dependency to add"),
|
||||
] = None,
|
||||
*,
|
||||
api_path: Annotated[list[str], typer.Option(help="API paths to add")] = [],
|
||||
project_dir: Annotated[
|
||||
Optional[Path], typer.Option(help="The project directory")
|
||||
Optional[Path],
|
||||
typer.Option(help="The project directory"),
|
||||
] = None,
|
||||
repo: Annotated[
|
||||
list[str],
|
||||
typer.Option(help="Install templates from a specific github repo instead"),
|
||||
] = [],
|
||||
branch: Annotated[
|
||||
list[str], typer.Option(help="Install templates from a specific branch")
|
||||
list[str],
|
||||
typer.Option(help="Install templates from a specific branch"),
|
||||
] = [],
|
||||
pip: Annotated[
|
||||
bool,
|
||||
@ -152,13 +157,12 @@ def add(
|
||||
),
|
||||
],
|
||||
) -> None:
|
||||
"""Adds the specified template to the current LangServe app.
|
||||
"""Add the specified template to the current LangServe app.
|
||||
|
||||
e.g.:
|
||||
langchain app add extraction-openai-functions
|
||||
langchain app add git+ssh://git@github.com/efriis/simple-pirate.git
|
||||
"""
|
||||
|
||||
if not branch and not repo:
|
||||
warnings.warn(
|
||||
"Adding templates from the default branch and repo is deprecated."
|
||||
@ -173,7 +177,7 @@ def add(
|
||||
package_dir = project_root / "packages"
|
||||
|
||||
create_events(
|
||||
[{"event": "serve add", "properties": {"parsed_dep": d}} for d in parsed_deps]
|
||||
[{"event": "serve add", "properties": {"parsed_dep": d}} for d in parsed_deps],
|
||||
)
|
||||
|
||||
# group by repo/ref
|
||||
@ -248,7 +252,7 @@ def add(
|
||||
cmd = ["pip", "install", "-e", *installed_destination_strs]
|
||||
cmd_str = " \\\n ".join(installed_destination_strs)
|
||||
typer.echo(f"Running: pip install -e \\\n {cmd_str}")
|
||||
subprocess.run(cmd, cwd=cwd)
|
||||
subprocess.run(cmd, cwd=cwd) # noqa: S603
|
||||
|
||||
chain_names = []
|
||||
for e in installed_exports:
|
||||
@ -296,10 +300,11 @@ def remove(
|
||||
api_paths: Annotated[list[str], typer.Argument(help="The API paths to remove")],
|
||||
*,
|
||||
project_dir: Annotated[
|
||||
Optional[Path], typer.Option(help="The project directory")
|
||||
Optional[Path],
|
||||
typer.Option(help="The project directory"),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Removes the specified package from the current LangServe app."""
|
||||
"""Remove the specified package from the current LangServe app."""
|
||||
project_root = get_package_root(project_dir)
|
||||
|
||||
project_pyproject = project_root / "pyproject.toml"
|
||||
@ -320,7 +325,7 @@ def remove(
|
||||
|
||||
shutil.rmtree(package_dir)
|
||||
remove_deps.append(api_path)
|
||||
except Exception:
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
try:
|
||||
@ -334,16 +339,19 @@ def remove(
|
||||
def serve(
|
||||
*,
|
||||
port: Annotated[
|
||||
Optional[int], typer.Option(help="The port to run the server on")
|
||||
Optional[int],
|
||||
typer.Option(help="The port to run the server on"),
|
||||
] = None,
|
||||
host: Annotated[
|
||||
Optional[str], typer.Option(help="The host to run the server on")
|
||||
Optional[str],
|
||||
typer.Option(help="The host to run the server on"),
|
||||
] = None,
|
||||
app: Annotated[
|
||||
Optional[str], typer.Option(help="The app to run, e.g. `app.server:app`")
|
||||
Optional[str],
|
||||
typer.Option(help="The app to run, e.g. `app.server:app`"),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Starts the LangServe app."""
|
||||
"""Start the LangServe app."""
|
||||
# add current dir as first entry of path
|
||||
sys.path.append(str(Path.cwd()))
|
||||
|
||||
@ -353,5 +361,8 @@ def serve(
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
app_str, host=host_str, port=port if port is not None else 8000, reload=True
|
||||
app_str,
|
||||
host=host_str,
|
||||
port=port if port is not None else 8000,
|
||||
reload=True,
|
||||
)
|
||||
|
@ -66,7 +66,7 @@ def new(
|
||||
Optional[str],
|
||||
typer.Option(
|
||||
help="The name of the integration in PascalCase. e.g. `MyIntegration`."
|
||||
" This is used to name classes like `MyIntegrationVectorStore`"
|
||||
" This is used to name classes like `MyIntegrationVectorStore`",
|
||||
),
|
||||
] = None,
|
||||
src: Annotated[
|
||||
@ -85,7 +85,7 @@ def new(
|
||||
),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Creates a new integration package."""
|
||||
"""Create a new integration package."""
|
||||
try:
|
||||
replacements = _process_name(name)
|
||||
except ValueError as e:
|
||||
@ -96,13 +96,14 @@ def new(
|
||||
if not re.match(r"^[A-Z][a-zA-Z0-9]*$", name_class):
|
||||
typer.echo(
|
||||
"Name should only contain letters (a-z, A-Z), numbers, and underscores"
|
||||
", and start with a capital letter."
|
||||
", and start with a capital letter.",
|
||||
)
|
||||
raise typer.Exit(code=1)
|
||||
replacements["__ModuleName__"] = name_class
|
||||
else:
|
||||
replacements["__ModuleName__"] = typer.prompt(
|
||||
"Name of integration in PascalCase", default=replacements["__ModuleName__"]
|
||||
"Name of integration in PascalCase",
|
||||
default=replacements["__ModuleName__"],
|
||||
)
|
||||
|
||||
project_template_dir = Path(__file__).parents[1] / "integration_template"
|
||||
@ -124,7 +125,7 @@ def new(
|
||||
|
||||
# poetry install
|
||||
subprocess.run(
|
||||
["poetry", "install", "--with", "lint,test,typing,test_integration"],
|
||||
["poetry", "install", "--with", "lint,test,typing,test_integration"], # noqa: S607
|
||||
cwd=destination_dir,
|
||||
)
|
||||
else:
|
||||
@ -152,7 +153,7 @@ def new(
|
||||
if len(dst_paths) != len(set(dst_paths)):
|
||||
typer.echo(
|
||||
"Duplicate destination paths provided or computed - please "
|
||||
"specify them explicitly with --dst."
|
||||
"specify them explicitly with --dst.",
|
||||
)
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
@ -224,10 +225,10 @@ def create_doc(
|
||||
),
|
||||
] = "docs/docs/integrations/chat/",
|
||||
) -> None:
|
||||
"""Creates a new integration doc."""
|
||||
"""Create a new integration doc."""
|
||||
if component_type not in TEMPLATE_MAP:
|
||||
typer.echo(
|
||||
f"Unrecognized {component_type=}. Expected one of {_component_types_str}."
|
||||
f"Unrecognized {component_type=}. Expected one of {_component_types_str}.",
|
||||
)
|
||||
raise typer.Exit(code=1)
|
||||
|
||||
|
@ -6,14 +6,17 @@ import pkgutil
|
||||
|
||||
|
||||
def generate_raw_migrations(
|
||||
from_package: str, to_package: str, filter_by_all: bool = False
|
||||
from_package: str,
|
||||
to_package: str,
|
||||
filter_by_all: bool = False, # noqa: FBT001, FBT002
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Scan the `langchain` package and generate migrations for all modules."""
|
||||
package = importlib.import_module(from_package)
|
||||
|
||||
items = []
|
||||
for _importer, modname, _ispkg in pkgutil.walk_packages(
|
||||
package.__path__, package.__name__ + "."
|
||||
package.__path__,
|
||||
package.__name__ + ".",
|
||||
):
|
||||
try:
|
||||
module = importlib.import_module(modname)
|
||||
@ -34,33 +37,35 @@ def generate_raw_migrations(
|
||||
obj = getattr(module, name, None)
|
||||
except ImportError:
|
||||
continue
|
||||
if obj and (inspect.isclass(obj) or inspect.isfunction(obj)):
|
||||
if obj.__module__.startswith(to_package):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}")
|
||||
)
|
||||
if (
|
||||
obj
|
||||
and (inspect.isclass(obj) or inspect.isfunction(obj))
|
||||
and obj.__module__.startswith(to_package)
|
||||
):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}"),
|
||||
)
|
||||
|
||||
if not filter_by_all:
|
||||
# Iterate over all members of the module
|
||||
for name, obj in inspect.getmembers(module):
|
||||
# Check if it's a class or function
|
||||
if inspect.isclass(obj) or inspect.isfunction(obj):
|
||||
# Check if the module name of the obj starts with
|
||||
# 'langchain_community'
|
||||
if obj.__module__.startswith(to_package):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}")
|
||||
)
|
||||
# Check if the module name of the obj starts with
|
||||
# 'langchain_community'
|
||||
if inspect.isclass(obj) or (
|
||||
inspect.isfunction(obj) and obj.__module__.startswith(to_package)
|
||||
):
|
||||
items.append(
|
||||
(f"{modname}.{name}", f"{obj.__module__}.{obj.__name__}"),
|
||||
)
|
||||
|
||||
return items
|
||||
|
||||
|
||||
def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
|
||||
"""This code will look at all the top level modules in langchain_community.
|
||||
"""Look at all the top level modules in langchain_community.
|
||||
|
||||
It'll attempt to import everything from each __init__ file
|
||||
|
||||
for example,
|
||||
Attempt to import everything from each ``__init__`` file. For example,
|
||||
|
||||
langchain_community/
|
||||
chat_models/
|
||||
@ -74,10 +79,10 @@ def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
|
||||
|
||||
Each tuple will contain the fully qualified path of the class / function to where
|
||||
its logic is defined
|
||||
(e.g., langchain_community.chat_models.xyz_implementation.ver2.XYZ)
|
||||
(e.g., ``langchain_community.chat_models.xyz_implementation.ver2.XYZ``)
|
||||
and the second tuple will contain the path
|
||||
to importing it from the top level namespaces
|
||||
(e.g., langchain_community.chat_models.XYZ)
|
||||
(e.g., ``langchain_community.chat_models.XYZ``)
|
||||
"""
|
||||
package = importlib.import_module(pkg)
|
||||
|
||||
@ -98,7 +103,7 @@ def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
|
||||
top_level_import = f"{module_name}.{name}"
|
||||
# Append the tuple with original and top-level paths
|
||||
items.append(
|
||||
(f"{original_module}.{original_name}", top_level_import)
|
||||
(f"{original_module}.{original_name}", top_level_import),
|
||||
)
|
||||
|
||||
# Handle the package itself (root level)
|
||||
@ -106,7 +111,8 @@ def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
|
||||
|
||||
# Only iterate through top-level modules/packages
|
||||
for _finder, modname, ispkg in pkgutil.iter_modules(
|
||||
package.__path__, package.__name__ + "."
|
||||
package.__path__,
|
||||
package.__name__ + ".",
|
||||
):
|
||||
if ispkg:
|
||||
try:
|
||||
@ -119,11 +125,15 @@ def generate_top_level_imports(pkg: str) -> list[tuple[str, str]]:
|
||||
|
||||
|
||||
def generate_simplified_migrations(
|
||||
from_package: str, to_package: str, filter_by_all: bool = True
|
||||
from_package: str,
|
||||
to_package: str,
|
||||
filter_by_all: bool = True, # noqa: FBT001, FBT002
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Get all the raw migrations, then simplify them if possible."""
|
||||
raw_migrations = generate_raw_migrations(
|
||||
from_package, to_package, filter_by_all=filter_by_all
|
||||
from_package,
|
||||
to_package,
|
||||
filter_by_all=filter_by_all,
|
||||
)
|
||||
top_level_simplifications = generate_top_level_imports(to_package)
|
||||
top_level_dict = dict(top_level_simplifications)
|
||||
|
@ -17,7 +17,7 @@ def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -
|
||||
]
|
||||
"""
|
||||
for from_module, to_module in migration_pairs
|
||||
]
|
||||
],
|
||||
)
|
||||
pattern_name = f"langchain_migrate_{name}"
|
||||
return f"""
|
||||
|
@ -28,6 +28,7 @@ def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
|
||||
|
||||
Returns:
|
||||
List of 2-tuples containing old and new import paths.
|
||||
|
||||
"""
|
||||
package = importlib.import_module(pkg_name)
|
||||
classes_ = find_subclasses_in_module(
|
||||
|
@ -20,7 +20,7 @@ class ImportExtractor(ast.NodeVisitor):
|
||||
self.imports: list = []
|
||||
self.package = from_package
|
||||
|
||||
def visit_ImportFrom(self, node) -> None:
|
||||
def visit_ImportFrom(self, node) -> None: # noqa: N802
|
||||
if node.module and (
|
||||
self.package is None or str(node.module).startswith(self.package)
|
||||
):
|
||||
@ -39,7 +39,7 @@ def _get_class_names(code: str) -> list[str]:
|
||||
|
||||
# Define a node visitor class to collect class names
|
||||
class ClassVisitor(ast.NodeVisitor):
|
||||
def visit_ClassDef(self, node) -> None:
|
||||
def visit_ClassDef(self, node) -> None: # noqa: N802
|
||||
class_names.append(node.name)
|
||||
self.generic_visit(node)
|
||||
|
||||
@ -79,7 +79,9 @@ def _get_all_classnames_from_file(file: Path, pkg: str) -> list[tuple[str, str]]
|
||||
|
||||
|
||||
def identify_all_imports_in_file(
|
||||
file: str, *, from_package: Optional[str] = None
|
||||
file: str,
|
||||
*,
|
||||
from_package: Optional[str] = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Let's also identify all the imports in the given file."""
|
||||
with open(file, encoding="utf-8") as f:
|
||||
@ -96,10 +98,13 @@ def identify_pkg_source(pkg_root: str) -> pathlib.Path:
|
||||
|
||||
Returns:
|
||||
Returns the path to the source code for the package.
|
||||
|
||||
"""
|
||||
dirs = [d for d in Path(pkg_root).iterdir() if d.is_dir()]
|
||||
matching_dirs = [d for d in dirs if d.name.startswith("langchain_")]
|
||||
assert len(matching_dirs) == 1, "There should be only one langchain package."
|
||||
if len(matching_dirs) != 1:
|
||||
msg = "There should be only one langchain package."
|
||||
raise ValueError(msg)
|
||||
return matching_dirs[0]
|
||||
|
||||
|
||||
@ -134,7 +139,9 @@ def list_init_imports_by_package(pkg_root: str) -> list[tuple[str, str]]:
|
||||
|
||||
|
||||
def find_imports_from_package(
|
||||
code: str, *, from_package: Optional[str] = None
|
||||
code: str,
|
||||
*,
|
||||
from_package: Optional[str] = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
# Parse the code into an AST
|
||||
tree = ast.parse(code)
|
||||
|
@ -4,7 +4,7 @@ from pathlib import Path
|
||||
|
||||
import rich
|
||||
import typer
|
||||
from gritql import run # type: ignore
|
||||
from gritql import run # type: ignore[import]
|
||||
from typer import Option
|
||||
|
||||
|
||||
@ -17,13 +17,13 @@ def get_gritdir_path() -> Path:
|
||||
def migrate(
|
||||
ctx: typer.Context,
|
||||
# Using diff instead of dry-run for backwards compatibility with the old CLI
|
||||
diff: bool = Option(
|
||||
False,
|
||||
diff: bool = Option( # noqa: FBT001
|
||||
False, # noqa: FBT003
|
||||
"--diff",
|
||||
help="Show the changes that would be made without applying them.",
|
||||
),
|
||||
interactive: bool = Option(
|
||||
False,
|
||||
interactive: bool = Option( # noqa: FBT001
|
||||
False, # noqa: FBT003
|
||||
"--interactive",
|
||||
help="Prompt for confirmation before making each change",
|
||||
),
|
||||
@ -54,7 +54,7 @@ def migrate(
|
||||
'⚠️ This script is a "best-effort", and is likely to make some '
|
||||
"mistakes.\n\n"
|
||||
"🛡️ Backup your code prior to running the migration script -- it will "
|
||||
"modify your files!\n\n"
|
||||
"modify your files!\n\n",
|
||||
)
|
||||
rich.print("-" * 10)
|
||||
rich.print()
|
||||
|
@ -16,12 +16,12 @@ package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
|
||||
@package_cli.command()
|
||||
def new(
|
||||
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
|
||||
with_poetry: Annotated[
|
||||
with_poetry: Annotated[ # noqa: FBT002
|
||||
bool,
|
||||
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Creates a new template package."""
|
||||
"""Create a new template package."""
|
||||
computed_name = name if name != "." else Path.cwd().name
|
||||
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
|
||||
|
||||
@ -53,8 +53,9 @@ def new(
|
||||
pyproject_contents = pyproject.read_text()
|
||||
pyproject.write_text(
|
||||
pyproject_contents.replace("__package_name__", package_name).replace(
|
||||
"__module_name__", module_name
|
||||
)
|
||||
"__module_name__",
|
||||
module_name,
|
||||
),
|
||||
)
|
||||
|
||||
# move module folder
|
||||
@ -71,23 +72,26 @@ def new(
|
||||
readme_contents = readme.read_text()
|
||||
readme.write_text(
|
||||
readme_contents.replace("__package_name__", package_name).replace(
|
||||
"__app_route_code__", app_route_code
|
||||
)
|
||||
"__app_route_code__",
|
||||
app_route_code,
|
||||
),
|
||||
)
|
||||
|
||||
# poetry install
|
||||
if with_poetry:
|
||||
subprocess.run(["poetry", "install"], cwd=destination_dir)
|
||||
subprocess.run(["poetry", "install"], cwd=destination_dir) # noqa: S607
|
||||
|
||||
|
||||
@package_cli.command()
|
||||
def serve(
|
||||
*,
|
||||
port: Annotated[
|
||||
Optional[int], typer.Option(help="The port to run the server on")
|
||||
Optional[int],
|
||||
typer.Option(help="The port to run the server on"),
|
||||
] = None,
|
||||
host: Annotated[
|
||||
Optional[str], typer.Option(help="The host to run the server on")
|
||||
Optional[str],
|
||||
typer.Option(help="The host to run the server on"),
|
||||
] = None,
|
||||
configurable: Annotated[
|
||||
Optional[bool],
|
||||
@ -104,7 +108,7 @@ def serve(
|
||||
),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Starts a demo app for this template."""
|
||||
"""Start a demo app for this template."""
|
||||
# load pyproject.toml
|
||||
project_dir = get_package_root()
|
||||
pyproject = project_dir / "pyproject.toml"
|
||||
@ -136,7 +140,7 @@ def serve(
|
||||
|
||||
|
||||
@package_cli.command()
|
||||
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None:
|
||||
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None: # noqa: A001
|
||||
"""List all or search for available templates."""
|
||||
from langchain_cli.utils.github import list_packages
|
||||
|
||||
|
@ -20,7 +20,7 @@ def create_events(events: list[EventDict]) -> Optional[Any]:
|
||||
"properties": event.get("properties"),
|
||||
}
|
||||
for event in events
|
||||
]
|
||||
],
|
||||
}
|
||||
|
||||
conn = http.client.HTTPSConnection("app.firstpartyhq.com")
|
||||
|
@ -122,7 +122,9 @@ def parse_dependencies(
|
||||
api_path: list[str],
|
||||
) -> list[DependencySource]:
|
||||
num_deps = max(
|
||||
len(dependencies) if dependencies is not None else 0, len(repo), len(branch)
|
||||
len(dependencies) if dependencies is not None else 0,
|
||||
len(repo),
|
||||
len(branch),
|
||||
)
|
||||
if (
|
||||
(dependencies and len(dependencies) != num_deps)
|
||||
@ -143,7 +145,10 @@ def parse_dependencies(
|
||||
return [
|
||||
parse_dependency_string(iter_dep, iter_repo, iter_branch, iter_api_path)
|
||||
for iter_dep, iter_repo, iter_branch, iter_api_path in zip(
|
||||
inner_deps, inner_repos, inner_branches, inner_api_paths
|
||||
inner_deps,
|
||||
inner_repos,
|
||||
inner_branches,
|
||||
inner_api_paths,
|
||||
)
|
||||
]
|
||||
|
||||
@ -186,7 +191,7 @@ def copy_repo(
|
||||
source: Path,
|
||||
destination: Path,
|
||||
) -> None:
|
||||
"""Copies a repo, ignoring git folders.
|
||||
"""Copiy a repo, ignoring git folders.
|
||||
|
||||
Raises FileNotFound error if it can't find source
|
||||
"""
|
||||
|
@ -13,7 +13,9 @@ def list_packages(*, contains: Optional[str] = None):
|
||||
}
|
||||
|
||||
conn.request(
|
||||
"GET", "/repos/langchain-ai/langchain/contents/templates", headers=headers
|
||||
"GET",
|
||||
"/repos/langchain-ai/langchain/contents/templates",
|
||||
headers=headers,
|
||||
)
|
||||
res = conn.getresponse()
|
||||
|
||||
|
@ -26,6 +26,7 @@ class LangServeExport(TypedDict):
|
||||
module: The module to import from, tool.langserve.export_module
|
||||
attr: The attribute to import from the module, tool.langserve.export_attr
|
||||
package_name: The name of the package, tool.poetry.name
|
||||
|
||||
"""
|
||||
|
||||
module: str
|
||||
|
@ -14,7 +14,8 @@ def _get_dep_inline_table(path: Path) -> InlineTable:
|
||||
|
||||
|
||||
def add_dependencies_to_pyproject_toml(
|
||||
pyproject_toml: Path, local_editable_dependencies: Iterable[tuple[str, Path]]
|
||||
pyproject_toml: Path,
|
||||
local_editable_dependencies: Iterable[tuple[str, Path]],
|
||||
) -> None:
|
||||
"""Add dependencies to pyproject.toml."""
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
@ -24,14 +25,15 @@ def add_dependencies_to_pyproject_toml(
|
||||
{
|
||||
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
|
||||
for name, loc in local_editable_dependencies
|
||||
}
|
||||
},
|
||||
)
|
||||
with open(pyproject_toml, "w", encoding="utf-8") as f:
|
||||
dump(pyproject, f)
|
||||
|
||||
|
||||
def remove_dependencies_from_pyproject_toml(
|
||||
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
|
||||
pyproject_toml: Path,
|
||||
local_editable_dependencies: Iterable[str],
|
||||
) -> None:
|
||||
"""Remove dependencies from pyproject.toml."""
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
|
@ -49,12 +49,45 @@ exclude = [
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
"E", # pycodestyle
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"T201", # print
|
||||
"UP", # pyupgrade
|
||||
"A", # flake8-builtins
|
||||
"ARG", # flake8-unused-arguments
|
||||
"ASYNC", # flake8-async
|
||||
"C4", # flake8-comprehensions
|
||||
"COM", # flake8-commas
|
||||
"D", # pydocstyle
|
||||
"DOC", # pydoclint
|
||||
"E", # pycodestyle error
|
||||
"EM", # flake8-errmsg
|
||||
"F", # pyflakes
|
||||
"FA", # flake8-future-annotations
|
||||
"FBT", # flake8-boolean-trap
|
||||
"FLY", # flake8-flynt
|
||||
"I", # isort
|
||||
"ICN", # flake8-import-conventions
|
||||
"INT", # flake8-gettext
|
||||
"ISC", # isort-comprehensions
|
||||
"N", # pep8-naming
|
||||
"PT", # flake8-pytest-style
|
||||
"PGH", # pygrep-hooks
|
||||
"PIE", # flake8-pie
|
||||
"PERF", # flake8-perf
|
||||
"PYI", # flake8-pyi
|
||||
"Q", # flake8-quotes
|
||||
"RET", # flake8-return
|
||||
"RSE", # flake8-rst-docstrings
|
||||
"RUF", # ruff
|
||||
"S", # flake8-bandit
|
||||
"SLF", # flake8-self
|
||||
"SLOT", # flake8-slots
|
||||
"SIM", # flake8-simplify
|
||||
"T10", # flake8-debugger
|
||||
"T20", # flake8-print
|
||||
"TID", # flake8-tidy-imports
|
||||
"UP", # pyupgrade
|
||||
"W", # pycodestyle warning
|
||||
"YTT", # flake8-2020
|
||||
]
|
||||
ignore = ["D100", "D101", "D102", "D103", "D104", "D105", "D107"]
|
||||
pyupgrade.keep-runtime-typing = true
|
||||
|
||||
[tool.mypy]
|
||||
|
@ -1,4 +1,3 @@
|
||||
# type: ignore
|
||||
"""Script to generate migrations for the migration script."""
|
||||
|
||||
import json
|
||||
@ -45,12 +44,17 @@ def cli() -> None:
|
||||
)
|
||||
@click.option(
|
||||
"--format",
|
||||
"format_",
|
||||
type=click.Choice(["json", "grit"], case_sensitive=False),
|
||||
default="json",
|
||||
help="The output format for the migration script (json or grit).",
|
||||
)
|
||||
def generic(
|
||||
pkg1: str, pkg2: str, output: str, filter_by_all: bool, format: str
|
||||
pkg1: str,
|
||||
pkg2: str,
|
||||
output: str,
|
||||
filter_by_all: bool, # noqa: FBT001
|
||||
format_: str,
|
||||
) -> None:
|
||||
"""Generate a migration script."""
|
||||
click.echo("Migration script generated.")
|
||||
@ -62,9 +66,9 @@ def generic(
|
||||
name = f"{pkg1}_to_{pkg2}"
|
||||
|
||||
if output is None:
|
||||
output = f"{name}.json" if format == "json" else f"{name}.grit"
|
||||
output = f"{name}.json" if format_ == "json" else f"{name}.grit"
|
||||
|
||||
if format == "json":
|
||||
if format_ == "json":
|
||||
dumped = json.dumps(migrations, indent=2, sort_keys=True)
|
||||
else:
|
||||
dumped = dump_migrations_as_grit(name, migrations)
|
||||
|
@ -6,7 +6,7 @@ class File:
|
||||
self.name = name
|
||||
self.content = "\n".join(content or [])
|
||||
|
||||
def __eq__(self, __value: object) -> bool:
|
||||
def __eq__(self, __value: object, /) -> bool:
|
||||
if not isinstance(__value, File):
|
||||
return NotImplemented
|
||||
|
||||
|
@ -34,12 +34,12 @@ class Folder:
|
||||
files.append(cls.from_structure(path))
|
||||
else:
|
||||
files.append(
|
||||
File(path.name, path.read_text(encoding="utf-8").splitlines())
|
||||
File(path.name, path.read_text(encoding="utf-8").splitlines()),
|
||||
)
|
||||
|
||||
return Folder(name, *files)
|
||||
|
||||
def __eq__(self, __value: object) -> bool:
|
||||
def __eq__(self, __value: object, /) -> bool:
|
||||
if isinstance(__value, File):
|
||||
return False
|
||||
|
||||
|
@ -34,7 +34,7 @@ def find_issue(current: Folder, expected: Folder) -> str:
|
||||
expected_file.content.splitlines(),
|
||||
fromfile=current_file.name,
|
||||
tofile=expected_file.name,
|
||||
)
|
||||
),
|
||||
)
|
||||
return "Unknown"
|
||||
|
||||
@ -47,8 +47,10 @@ def test_command_line(tmp_path: Path) -> None:
|
||||
before.create_structure(root=Path(td))
|
||||
# The input is used to force through the confirmation.
|
||||
result = runner.invoke(app, ["migrate", before.name, "--force"])
|
||||
assert result.exit_code == 0, result.output
|
||||
if result.exit_code != 0:
|
||||
raise RuntimeError(result.output)
|
||||
|
||||
after = Folder.from_structure(Path(td) / before.name)
|
||||
|
||||
assert after == expected, find_issue(after, expected)
|
||||
if after != expected:
|
||||
raise ValueError(find_issue(after, expected))
|
||||
|
@ -12,14 +12,15 @@ def test_create_json_agent_migration() -> None:
|
||||
"""Test the migration of create_json_agent from langchain to langchain_community."""
|
||||
with sup1(), sup2():
|
||||
raw_migrations = generate_simplified_migrations(
|
||||
from_package="langchain", to_package="langchain_community"
|
||||
from_package="langchain",
|
||||
to_package="langchain_community",
|
||||
)
|
||||
json_agent_migrations = [
|
||||
migration
|
||||
for migration in raw_migrations
|
||||
if "create_json_agent" in migration[0]
|
||||
]
|
||||
assert json_agent_migrations == [
|
||||
if json_agent_migrations != [
|
||||
(
|
||||
"langchain.agents.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
@ -32,7 +33,9 @@ def test_create_json_agent_migration() -> None:
|
||||
"langchain.agents.agent_toolkits.json.base.create_json_agent",
|
||||
"langchain_community.agent_toolkits.create_json_agent",
|
||||
),
|
||||
]
|
||||
]:
|
||||
msg = "json_agent_migrations did not match the expected value"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="Unknown reason")
|
||||
@ -40,15 +43,21 @@ def test_create_single_store_retriever_db() -> None:
|
||||
"""Test migration from langchain to langchain_core."""
|
||||
with sup1(), sup2():
|
||||
raw_migrations = generate_simplified_migrations(
|
||||
from_package="langchain", to_package="langchain_core"
|
||||
from_package="langchain",
|
||||
to_package="langchain_core",
|
||||
)
|
||||
# SingleStore was an old name for VectorStoreRetriever
|
||||
single_store_migration = [
|
||||
migration for migration in raw_migrations if "SingleStore" in migration[0]
|
||||
]
|
||||
assert single_store_migration == [
|
||||
if single_store_migration != [
|
||||
(
|
||||
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
|
||||
"langchain_core.vectorstores.VectorStoreRetriever",
|
||||
),
|
||||
]
|
||||
]:
|
||||
msg = (
|
||||
"Unexpected migration: single_store_migration does not match expected "
|
||||
"value"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
@ -9,7 +9,7 @@ pytest.importorskip(modname="langchain_openai")
|
||||
|
||||
def test_generate_migrations() -> None:
|
||||
migrations = get_migrations_for_partner_package("langchain_openai")
|
||||
assert migrations == [
|
||||
if migrations != [
|
||||
("langchain_community.llms.openai.OpenAI", "langchain_openai.OpenAI"),
|
||||
("langchain_community.llms.openai.AzureOpenAI", "langchain_openai.AzureOpenAI"),
|
||||
(
|
||||
@ -43,4 +43,6 @@ def test_generate_migrations() -> None:
|
||||
"langchain_openai.AzureChatOpenAI",
|
||||
),
|
||||
("langchain_community.chat_models.ChatOpenAI", "langchain_openai.ChatOpenAI"),
|
||||
]
|
||||
]:
|
||||
msg = "Migrations do not match expected result"
|
||||
raise ValueError(msg)
|
||||
|
@ -2,4 +2,6 @@ from langchain_cli.namespaces.migrate.generate.utils import PKGS_ROOT
|
||||
|
||||
|
||||
def test_root() -> None:
|
||||
assert PKGS_ROOT.name == "libs"
|
||||
if PKGS_ROOT.name != "libs":
|
||||
msg = "Expected PKGS_ROOT.name to be 'libs'."
|
||||
raise ValueError(msg)
|
||||
|
@ -5,6 +5,7 @@ from langchain_cli.utils.events import EventDict, create_events
|
||||
|
||||
@pytest.mark.xfail(reason="Unknown reason")
|
||||
def test_create_events() -> None:
|
||||
assert create_events(
|
||||
[EventDict(event="Test Event", properties={"test": "test"})]
|
||||
) == {"status": "success"}
|
||||
result = create_events([EventDict(event="Test Event", properties={"test": "test"})])
|
||||
if result != {"status": "success"}:
|
||||
msg = "Expected {'status': 'success'}, got " + repr(result)
|
||||
raise ValueError(msg)
|
||||
|
@ -18,17 +18,37 @@ def _assert_dependency_equals(
|
||||
subdirectory: Optional[str] = None,
|
||||
event_metadata: Optional[dict] = None,
|
||||
) -> None:
|
||||
assert dep["git"] == git
|
||||
assert dep["ref"] == ref
|
||||
assert dep["subdirectory"] == subdirectory
|
||||
if event_metadata is not None:
|
||||
assert dep["event_metadata"] == event_metadata
|
||||
if dep["git"] != git:
|
||||
msg = f"Expected git to be {git} but got {dep['git']}"
|
||||
raise ValueError(msg)
|
||||
if dep["ref"] != ref:
|
||||
msg = f"Expected ref to be {ref} but got {dep['ref']}"
|
||||
raise ValueError(msg)
|
||||
if dep["subdirectory"] != subdirectory:
|
||||
msg = (
|
||||
f"Expected subdirectory to be {subdirectory} but got {dep['subdirectory']}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if dep["subdirectory"] != subdirectory:
|
||||
msg = (
|
||||
f"Expected subdirectory to be {subdirectory} but got {dep['subdirectory']}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if event_metadata is not None and dep["event_metadata"] != event_metadata:
|
||||
msg = (
|
||||
f"Expected event_metadata to be {event_metadata} "
|
||||
f"but got {dep['event_metadata']}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def test_dependency_string() -> None:
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com/efriis/myrepo.git", None, None, None
|
||||
"git+ssh://git@github.com/efriis/myrepo.git",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com/efriis/myrepo.git",
|
||||
ref=None,
|
||||
@ -49,7 +69,10 @@ def test_dependency_string() -> None:
|
||||
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com:efriis/myrepo.git#develop", None, None, None
|
||||
"git+ssh://git@github.com:efriis/myrepo.git#develop",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com:efriis/myrepo.git",
|
||||
ref="develop",
|
||||
@ -59,7 +82,10 @@ def test_dependency_string() -> None:
|
||||
# also support a slash in ssh
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com/efriis/myrepo.git#develop", None, None, None
|
||||
"git+ssh://git@github.com/efriis/myrepo.git#develop",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com/efriis/myrepo.git",
|
||||
ref="develop",
|
||||
@ -69,7 +95,10 @@ def test_dependency_string() -> None:
|
||||
# looks like poetry supports both an @ and a #
|
||||
_assert_dependency_equals(
|
||||
parse_dependency_string(
|
||||
"git+ssh://git@github.com:efriis/myrepo.git@develop", None, None, None
|
||||
"git+ssh://git@github.com:efriis/myrepo.git@develop",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
git="ssh://git@github.com:efriis/myrepo.git",
|
||||
ref="develop",
|
||||
@ -100,7 +129,8 @@ def test_dependency_string_both() -> None:
|
||||
|
||||
def test_dependency_string_invalids() -> None:
|
||||
# expect error for wrong order
|
||||
with pytest.raises(ValueError):
|
||||
# Bypassing validation since the ValueError message is dynamic
|
||||
with pytest.raises(ValueError): # noqa: PT011
|
||||
parse_dependency_string(
|
||||
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch",
|
||||
None,
|
||||
|
@ -26,9 +26,7 @@ readme = "README.md"
|
||||
repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
[dependency-groups]
|
||||
lint = [
|
||||
"ruff<0.13.0,>=0.12.2",
|
||||
]
|
||||
lint = ["ruff<0.13,>=0.12.2"]
|
||||
typing = [
|
||||
"mypy<1.16,>=1.15",
|
||||
"types-pyyaml<7.0.0.0,>=6.0.12.2",
|
||||
|
@ -28,11 +28,11 @@ def _warn_on_import(name: str, replacement: Optional[str] = None) -> None:
|
||||
if replacement:
|
||||
warnings.warn(
|
||||
f"Importing {name} from langchain root module is no longer supported. "
|
||||
f"Please use {replacement} instead."
|
||||
f"Please use {replacement} instead.",
|
||||
)
|
||||
else:
|
||||
warnings.warn(
|
||||
f"Importing {name} from langchain root module is no longer supported."
|
||||
f"Importing {name} from langchain root module is no longer supported.",
|
||||
)
|
||||
|
||||
|
||||
@ -170,7 +170,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.llms import HuggingFaceTextGenInference
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.llms.HuggingFaceTextGenInference"
|
||||
name,
|
||||
replacement="langchain_community.llms.HuggingFaceTextGenInference",
|
||||
)
|
||||
|
||||
return HuggingFaceTextGenInference
|
||||
@ -235,7 +236,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_core.prompts import FewShotPromptTemplate
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_core.prompts.FewShotPromptTemplate"
|
||||
name,
|
||||
replacement="langchain_core.prompts.FewShotPromptTemplate",
|
||||
)
|
||||
|
||||
return FewShotPromptTemplate
|
||||
@ -263,7 +265,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import ArxivAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.ArxivAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.ArxivAPIWrapper",
|
||||
)
|
||||
|
||||
return ArxivAPIWrapper
|
||||
@ -271,7 +274,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import GoldenQueryAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.GoldenQueryAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.GoldenQueryAPIWrapper",
|
||||
)
|
||||
|
||||
return GoldenQueryAPIWrapper
|
||||
@ -279,7 +283,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import GoogleSearchAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.GoogleSearchAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.GoogleSearchAPIWrapper",
|
||||
)
|
||||
|
||||
return GoogleSearchAPIWrapper
|
||||
@ -287,7 +292,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.GoogleSerperAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.GoogleSerperAPIWrapper",
|
||||
)
|
||||
|
||||
return GoogleSerperAPIWrapper
|
||||
@ -295,7 +301,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import PowerBIDataset
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.PowerBIDataset"
|
||||
name,
|
||||
replacement="langchain_community.utilities.PowerBIDataset",
|
||||
)
|
||||
|
||||
return PowerBIDataset
|
||||
@ -303,7 +310,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import SearxSearchWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.SearxSearchWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.SearxSearchWrapper",
|
||||
)
|
||||
|
||||
return SearxSearchWrapper
|
||||
@ -311,7 +319,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import WikipediaAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.WikipediaAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.WikipediaAPIWrapper",
|
||||
)
|
||||
|
||||
return WikipediaAPIWrapper
|
||||
@ -319,7 +328,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import WolframAlphaAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.WolframAlphaAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.WolframAlphaAPIWrapper",
|
||||
)
|
||||
|
||||
return WolframAlphaAPIWrapper
|
||||
@ -339,7 +349,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.vectorstores import ElasticVectorSearch
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.vectorstores.ElasticVectorSearch"
|
||||
name,
|
||||
replacement="langchain_community.vectorstores.ElasticVectorSearch",
|
||||
)
|
||||
|
||||
return ElasticVectorSearch
|
||||
@ -348,7 +359,8 @@ def __getattr__(name: str) -> Any:
|
||||
from langchain_community.utilities import SerpAPIWrapper
|
||||
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.SerpAPIWrapper"
|
||||
name,
|
||||
replacement="langchain_community.utilities.SerpAPIWrapper",
|
||||
)
|
||||
|
||||
return SerpAPIWrapper
|
||||
|
@ -122,7 +122,9 @@ def __getattr__(name: str) -> Any:
|
||||
# Get directory of langchain package
|
||||
HERE = Path(__file__).parents[1]
|
||||
relative_path = as_import_path(
|
||||
Path(__file__).parent, suffix=name, relative_to=HERE
|
||||
Path(__file__).parent,
|
||||
suffix=name,
|
||||
relative_to=HERE,
|
||||
)
|
||||
old_path = "langchain." + relative_path
|
||||
new_path = "langchain_experimental." + relative_path
|
||||
|
@ -135,7 +135,8 @@ class BaseSingleActionAgent(BaseModel):
|
||||
if early_stopping_method == "force":
|
||||
# `force` just returns a constant string
|
||||
return AgentFinish(
|
||||
{"output": "Agent stopped due to iteration limit or time limit."}, ""
|
||||
{"output": "Agent stopped due to iteration limit or time limit."},
|
||||
"",
|
||||
)
|
||||
msg = f"Got unsupported early_stopping_method `{early_stopping_method}`"
|
||||
raise ValueError(msg)
|
||||
@ -375,7 +376,7 @@ class AgentOutputParser(BaseOutputParser[Union[AgentAction, AgentFinish]]):
|
||||
|
||||
|
||||
class MultiActionAgentOutputParser(
|
||||
BaseOutputParser[Union[list[AgentAction], AgentFinish]]
|
||||
BaseOutputParser[Union[list[AgentAction], AgentFinish]],
|
||||
):
|
||||
"""Base class for parsing agent output into agent actions/finish.
|
||||
|
||||
@ -491,7 +492,8 @@ class RunnableAgent(BaseSingleActionAgent):
|
||||
# Because the response from the plan is not a generator, we need to
|
||||
# accumulate the output into final output and return that.
|
||||
async for chunk in self.runnable.astream(
|
||||
inputs, config={"callbacks": callbacks}
|
||||
inputs,
|
||||
config={"callbacks": callbacks},
|
||||
):
|
||||
if final_output is None:
|
||||
final_output = chunk
|
||||
@ -499,7 +501,8 @@ class RunnableAgent(BaseSingleActionAgent):
|
||||
final_output += chunk
|
||||
else:
|
||||
final_output = await self.runnable.ainvoke(
|
||||
inputs, config={"callbacks": callbacks}
|
||||
inputs,
|
||||
config={"callbacks": callbacks},
|
||||
)
|
||||
return final_output
|
||||
|
||||
@ -607,7 +610,8 @@ class RunnableMultiActionAgent(BaseMultiActionAgent):
|
||||
# Because the response from the plan is not a generator, we need to
|
||||
# accumulate the output into final output and return that.
|
||||
async for chunk in self.runnable.astream(
|
||||
inputs, config={"callbacks": callbacks}
|
||||
inputs,
|
||||
config={"callbacks": callbacks},
|
||||
):
|
||||
if final_output is None:
|
||||
final_output = chunk
|
||||
@ -615,7 +619,8 @@ class RunnableMultiActionAgent(BaseMultiActionAgent):
|
||||
final_output += chunk
|
||||
else:
|
||||
final_output = await self.runnable.ainvoke(
|
||||
inputs, config={"callbacks": callbacks}
|
||||
inputs,
|
||||
config={"callbacks": callbacks},
|
||||
)
|
||||
|
||||
return final_output
|
||||
@ -764,7 +769,8 @@ class Agent(BaseSingleActionAgent):
|
||||
]
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
) -> Union[str, list[BaseMessage]]:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
@ -816,7 +822,9 @@ class Agent(BaseSingleActionAgent):
|
||||
return await self.output_parser.aparse(full_output)
|
||||
|
||||
def get_full_inputs(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]], **kwargs: Any
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
**kwargs: Any,
|
||||
) -> builtins.dict[str, Any]:
|
||||
"""Create the full inputs for the LLMChain from intermediate steps.
|
||||
|
||||
@ -858,7 +866,7 @@ class Agent(BaseSingleActionAgent):
|
||||
if "agent_scratchpad" not in prompt.input_variables:
|
||||
logger.warning(
|
||||
"`agent_scratchpad` should be a variable in prompt.input_variables."
|
||||
" Did not find it, so adding it at the end."
|
||||
" Did not find it, so adding it at the end.",
|
||||
)
|
||||
prompt.input_variables.append("agent_scratchpad")
|
||||
if isinstance(prompt, PromptTemplate):
|
||||
@ -964,7 +972,8 @@ class Agent(BaseSingleActionAgent):
|
||||
if early_stopping_method == "force":
|
||||
# `force` just returns a constant string
|
||||
return AgentFinish(
|
||||
{"output": "Agent stopped due to iteration limit or time limit."}, ""
|
||||
{"output": "Agent stopped due to iteration limit or time limit."},
|
||||
"",
|
||||
)
|
||||
if early_stopping_method == "generate":
|
||||
# Generate does one final forward pass
|
||||
@ -1072,7 +1081,8 @@ class AgentExecutor(Chain):
|
||||
as an observation.
|
||||
"""
|
||||
trim_intermediate_steps: Union[
|
||||
int, Callable[[list[tuple[AgentAction, str]]], list[tuple[AgentAction, str]]]
|
||||
int,
|
||||
Callable[[list[tuple[AgentAction, str]]], list[tuple[AgentAction, str]]],
|
||||
] = -1
|
||||
"""How to trim the intermediate steps before returning them.
|
||||
Defaults to -1, which means no trimming.
|
||||
@ -1153,11 +1163,13 @@ class AgentExecutor(Chain):
|
||||
stream_runnable = values.pop("stream_runnable", True)
|
||||
if multi_action:
|
||||
values["agent"] = RunnableMultiActionAgent(
|
||||
runnable=agent, stream_runnable=stream_runnable
|
||||
runnable=agent,
|
||||
stream_runnable=stream_runnable,
|
||||
)
|
||||
else:
|
||||
values["agent"] = RunnableAgent(
|
||||
runnable=agent, stream_runnable=stream_runnable
|
||||
runnable=agent,
|
||||
stream_runnable=stream_runnable,
|
||||
)
|
||||
return values
|
||||
|
||||
@ -1281,7 +1293,9 @@ class AgentExecutor(Chain):
|
||||
) -> dict[str, Any]:
|
||||
if run_manager:
|
||||
await run_manager.on_agent_finish(
|
||||
output, color="green", verbose=self.verbose
|
||||
output,
|
||||
color="green",
|
||||
verbose=self.verbose,
|
||||
)
|
||||
final_output = output.return_values
|
||||
if self.return_intermediate_steps:
|
||||
@ -1289,7 +1303,8 @@ class AgentExecutor(Chain):
|
||||
return final_output
|
||||
|
||||
def _consume_next_step(
|
||||
self, values: NextStepOutput
|
||||
self,
|
||||
values: NextStepOutput,
|
||||
) -> Union[AgentFinish, list[tuple[AgentAction, str]]]:
|
||||
if isinstance(values[-1], AgentFinish):
|
||||
if len(values) != 1:
|
||||
@ -1314,8 +1329,8 @@ class AgentExecutor(Chain):
|
||||
inputs,
|
||||
intermediate_steps,
|
||||
run_manager,
|
||||
)
|
||||
)
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def _iter_next_step(
|
||||
@ -1391,7 +1406,10 @@ class AgentExecutor(Chain):
|
||||
yield agent_action
|
||||
for agent_action in actions:
|
||||
yield self._perform_agent_action(
|
||||
name_to_tool_map, color_mapping, agent_action, run_manager
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
agent_action,
|
||||
run_manager,
|
||||
)
|
||||
|
||||
def _perform_agent_action(
|
||||
@ -1451,7 +1469,7 @@ class AgentExecutor(Chain):
|
||||
intermediate_steps,
|
||||
run_manager,
|
||||
)
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
async def _aiter_next_step(
|
||||
@ -1528,7 +1546,10 @@ class AgentExecutor(Chain):
|
||||
result = await asyncio.gather(
|
||||
*[
|
||||
self._aperform_agent_action(
|
||||
name_to_tool_map, color_mapping, agent_action, run_manager
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
agent_action,
|
||||
run_manager,
|
||||
)
|
||||
for agent_action in actions
|
||||
],
|
||||
@ -1547,7 +1568,9 @@ class AgentExecutor(Chain):
|
||||
) -> AgentStep:
|
||||
if run_manager:
|
||||
await run_manager.on_agent_action(
|
||||
agent_action, verbose=self.verbose, color="green"
|
||||
agent_action,
|
||||
verbose=self.verbose,
|
||||
color="green",
|
||||
)
|
||||
# Otherwise we lookup the tool
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
@ -1589,7 +1612,8 @@ class AgentExecutor(Chain):
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
# We construct a mapping from each tool to a color, used for logging.
|
||||
color_mapping = get_color_mapping(
|
||||
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
|
||||
[tool.name for tool in self.tools],
|
||||
excluded_colors=["green", "red"],
|
||||
)
|
||||
intermediate_steps: list[tuple[AgentAction, str]] = []
|
||||
# Let's start tracking the number of iterations and time elapsed
|
||||
@ -1607,7 +1631,9 @@ class AgentExecutor(Chain):
|
||||
)
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
next_step_output,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
intermediate_steps.extend(next_step_output)
|
||||
@ -1617,12 +1643,16 @@ class AgentExecutor(Chain):
|
||||
tool_return = self._get_tool_return(next_step_action)
|
||||
if tool_return is not None:
|
||||
return self._return(
|
||||
tool_return, intermediate_steps, run_manager=run_manager
|
||||
tool_return,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
iterations += 1
|
||||
time_elapsed = time.time() - start_time
|
||||
output = self._action_agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
self.early_stopping_method,
|
||||
intermediate_steps,
|
||||
**inputs,
|
||||
)
|
||||
return self._return(output, intermediate_steps, run_manager=run_manager)
|
||||
|
||||
@ -1636,7 +1666,8 @@ class AgentExecutor(Chain):
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
# We construct a mapping from each tool to a color, used for logging.
|
||||
color_mapping = get_color_mapping(
|
||||
[tool.name for tool in self.tools], excluded_colors=["green"]
|
||||
[tool.name for tool in self.tools],
|
||||
excluded_colors=["green"],
|
||||
)
|
||||
intermediate_steps: list[tuple[AgentAction, str]] = []
|
||||
# Let's start tracking the number of iterations and time elapsed
|
||||
@ -1668,28 +1699,39 @@ class AgentExecutor(Chain):
|
||||
tool_return = self._get_tool_return(next_step_action)
|
||||
if tool_return is not None:
|
||||
return await self._areturn(
|
||||
tool_return, intermediate_steps, run_manager=run_manager
|
||||
tool_return,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
iterations += 1
|
||||
time_elapsed = time.time() - start_time
|
||||
output = self._action_agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
self.early_stopping_method,
|
||||
intermediate_steps,
|
||||
**inputs,
|
||||
)
|
||||
return await self._areturn(
|
||||
output, intermediate_steps, run_manager=run_manager
|
||||
output,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
except (TimeoutError, asyncio.TimeoutError):
|
||||
# stop early when interrupted by the async timeout
|
||||
output = self._action_agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
self.early_stopping_method,
|
||||
intermediate_steps,
|
||||
**inputs,
|
||||
)
|
||||
return await self._areturn(
|
||||
output, intermediate_steps, run_manager=run_manager
|
||||
output,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
def _get_tool_return(
|
||||
self, next_step_output: tuple[AgentAction, str]
|
||||
self,
|
||||
next_step_output: tuple[AgentAction, str],
|
||||
) -> Optional[AgentFinish]:
|
||||
"""Check if the tool is a returning tool."""
|
||||
agent_action, observation = next_step_output
|
||||
@ -1709,7 +1751,8 @@ class AgentExecutor(Chain):
|
||||
return None
|
||||
|
||||
def _prepare_intermediate_steps(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
) -> list[tuple[AgentAction, str]]:
|
||||
if (
|
||||
isinstance(self.trim_intermediate_steps, int)
|
||||
|
@ -147,7 +147,7 @@ class AgentExecutorIterator:
|
||||
self.iterations += 1
|
||||
self.time_elapsed = time.time() - self.start_time
|
||||
logger.debug(
|
||||
f"Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)"
|
||||
f"Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)",
|
||||
)
|
||||
|
||||
def make_final_outputs(
|
||||
@ -160,8 +160,10 @@ class AgentExecutorIterator:
|
||||
|
||||
prepared_outputs = AddableDict(
|
||||
self.agent_executor.prep_outputs(
|
||||
self.inputs, outputs, return_only_outputs=True
|
||||
)
|
||||
self.inputs,
|
||||
outputs,
|
||||
return_only_outputs=True,
|
||||
),
|
||||
)
|
||||
if self.include_run_info:
|
||||
prepared_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
|
||||
@ -187,7 +189,8 @@ class AgentExecutorIterator:
|
||||
)
|
||||
try:
|
||||
while self.agent_executor._should_continue(
|
||||
self.iterations, self.time_elapsed
|
||||
self.iterations,
|
||||
self.time_elapsed,
|
||||
):
|
||||
# take the next step: this plans next action, executes it,
|
||||
# yielding action and observation as they are generated
|
||||
@ -254,7 +257,8 @@ class AgentExecutorIterator:
|
||||
try:
|
||||
async with asyncio_timeout(self.agent_executor.max_execution_time):
|
||||
while self.agent_executor._should_continue(
|
||||
self.iterations, self.time_elapsed
|
||||
self.iterations,
|
||||
self.time_elapsed,
|
||||
):
|
||||
# take the next step: this plans next action, executes it,
|
||||
# yielding action and observation as they are generated
|
||||
@ -272,11 +276,13 @@ class AgentExecutorIterator:
|
||||
if self.yield_actions:
|
||||
if isinstance(chunk, AgentAction):
|
||||
yield AddableDict(
|
||||
actions=[chunk], messages=chunk.messages
|
||||
actions=[chunk],
|
||||
messages=chunk.messages,
|
||||
)
|
||||
elif isinstance(chunk, AgentStep):
|
||||
yield AddableDict(
|
||||
steps=[chunk], messages=chunk.messages
|
||||
steps=[chunk],
|
||||
messages=chunk.messages,
|
||||
)
|
||||
|
||||
# convert iterator output to format handled by _process_next_step
|
||||
@ -285,7 +291,8 @@ class AgentExecutorIterator:
|
||||
self.update_iterations()
|
||||
# decide if this is the final output
|
||||
output = await self._aprocess_next_step_output(
|
||||
next_step, run_manager
|
||||
next_step,
|
||||
run_manager,
|
||||
)
|
||||
is_final = "intermediate_step" not in output
|
||||
# yield the final output always
|
||||
@ -317,7 +324,7 @@ class AgentExecutorIterator:
|
||||
logger.debug("Processing output of Agent loop step")
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
logger.debug(
|
||||
"Hit AgentFinish: _return -> on_chain_end -> run final output logic"
|
||||
"Hit AgentFinish: _return -> on_chain_end -> run final output logic",
|
||||
)
|
||||
return self._return(next_step_output, run_manager=run_manager)
|
||||
|
||||
@ -345,7 +352,7 @@ class AgentExecutorIterator:
|
||||
logger.debug("Processing output of async Agent loop step")
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
logger.debug(
|
||||
"Hit AgentFinish: _areturn -> on_chain_end -> run final output logic"
|
||||
"Hit AgentFinish: _areturn -> on_chain_end -> run final output logic",
|
||||
)
|
||||
return await self._areturn(next_step_output, run_manager=run_manager)
|
||||
|
||||
@ -388,26 +395,34 @@ class AgentExecutorIterator:
|
||||
return await self._areturn(output, run_manager=run_manager)
|
||||
|
||||
def _return(
|
||||
self, output: AgentFinish, run_manager: CallbackManagerForChainRun
|
||||
self,
|
||||
output: AgentFinish,
|
||||
run_manager: CallbackManagerForChainRun,
|
||||
) -> AddableDict:
|
||||
"""
|
||||
Return the final output of the iterator.
|
||||
"""
|
||||
returned_output = self.agent_executor._return(
|
||||
output, self.intermediate_steps, run_manager=run_manager
|
||||
output,
|
||||
self.intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
returned_output["messages"] = output.messages
|
||||
run_manager.on_chain_end(returned_output)
|
||||
return self.make_final_outputs(returned_output, run_manager)
|
||||
|
||||
async def _areturn(
|
||||
self, output: AgentFinish, run_manager: AsyncCallbackManagerForChainRun
|
||||
self,
|
||||
output: AgentFinish,
|
||||
run_manager: AsyncCallbackManagerForChainRun,
|
||||
) -> AddableDict:
|
||||
"""
|
||||
Return the final output of the async iterator.
|
||||
"""
|
||||
returned_output = await self.agent_executor._areturn(
|
||||
output, self.intermediate_steps, run_manager=run_manager
|
||||
output,
|
||||
self.intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
returned_output["messages"] = output.messages
|
||||
await run_manager.on_chain_end(returned_output)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit"
|
||||
"AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit"
|
||||
"AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -13,7 +13,7 @@ if TYPE_CHECKING:
|
||||
DEPRECATED_LOOKUP = {
|
||||
"AzureCognitiveServicesToolkit": (
|
||||
"langchain_community.agent_toolkits.azure_cognitive_services"
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ClickupToolkit": "langchain_community.agent_toolkits.clickup.toolkit"
|
||||
"ClickupToolkit": "langchain_community.agent_toolkits.clickup.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -20,7 +20,7 @@ def _get_default_system_message() -> SystemMessage:
|
||||
"Do your best to answer the questions. "
|
||||
"Feel free to use any tools available to look up "
|
||||
"relevant information, only if necessary"
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@ -58,7 +58,9 @@ def create_conversational_retrieval_agent(
|
||||
|
||||
if remember_intermediate_steps:
|
||||
memory: BaseMemory = AgentTokenBufferMemory(
|
||||
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
|
||||
memory_key=memory_key,
|
||||
llm=llm,
|
||||
max_token_limit=max_token_limit,
|
||||
)
|
||||
else:
|
||||
memory = ConversationTokenBufferMemory(
|
||||
|
@ -15,7 +15,7 @@ if TYPE_CHECKING:
|
||||
DEPRECATED_LOOKUP = {
|
||||
"FileManagementToolkit": (
|
||||
"langchain_community.agent_toolkits.file_management.toolkit"
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -13,7 +13,7 @@ if TYPE_CHECKING:
|
||||
DEPRECATED_LOOKUP = {
|
||||
"FileManagementToolkit": (
|
||||
"langchain_community.agent_toolkits.file_management.toolkit"
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit"
|
||||
"GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"create_json_agent": "langchain_community.agent_toolkits.json.base"
|
||||
"create_json_agent": "langchain_community.agent_toolkits.json.base",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit"
|
||||
"MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit"
|
||||
"O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.base"
|
||||
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.base",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -13,7 +13,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit"
|
||||
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -11,7 +11,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit"
|
||||
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base"
|
||||
"create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -11,7 +11,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base"
|
||||
"create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit"
|
||||
"PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base"
|
||||
"create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit"
|
||||
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit"
|
||||
"SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -1,13 +1,11 @@
|
||||
# flake8: noqa
|
||||
|
||||
PREFIX = """You are an agent designed to answer questions about sets of documents.
|
||||
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
|
||||
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.
|
||||
If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
ROUTER_PREFIX = """You are an agent designed to answer questions.
|
||||
You have access to tools for interacting with different sources, and the inputs to the tools are questions.
|
||||
Your main task is to decide which of the tools is relevant for answering question at hand.
|
||||
For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
@ -40,7 +40,8 @@ class VectorStoreToolkit(BaseToolkit):
|
||||
msg = "You need to install langchain-community to use this toolkit."
|
||||
raise ImportError(msg)
|
||||
description = VectorStoreQATool.get_description(
|
||||
self.vectorstore_info.name, self.vectorstore_info.description
|
||||
self.vectorstore_info.name,
|
||||
self.vectorstore_info.description,
|
||||
)
|
||||
qa_tool = VectorStoreQATool(
|
||||
name=self.vectorstore_info.name,
|
||||
@ -49,7 +50,8 @@ class VectorStoreToolkit(BaseToolkit):
|
||||
llm=self.llm,
|
||||
)
|
||||
description = VectorStoreQAWithSourcesTool.get_description(
|
||||
self.vectorstore_info.name, self.vectorstore_info.description
|
||||
self.vectorstore_info.name,
|
||||
self.vectorstore_info.description,
|
||||
)
|
||||
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
|
||||
name=f"{self.vectorstore_info.name}_with_sources",
|
||||
@ -82,7 +84,8 @@ class VectorStoreRouterToolkit(BaseToolkit):
|
||||
raise ImportError(msg)
|
||||
for vectorstore_info in self.vectorstores:
|
||||
description = VectorStoreQATool.get_description(
|
||||
vectorstore_info.name, vectorstore_info.description
|
||||
vectorstore_info.name,
|
||||
vectorstore_info.description,
|
||||
)
|
||||
qa_tool = VectorStoreQATool(
|
||||
name=vectorstore_info.name,
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit"
|
||||
"ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -49,7 +49,8 @@ class ChatAgent(Agent):
|
||||
return "Thought:"
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
) -> str:
|
||||
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
|
||||
if not isinstance(agent_scratchpad, str):
|
||||
@ -106,14 +107,7 @@ class ChatAgent(Agent):
|
||||
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = format_instructions.format(tool_names=tool_names)
|
||||
template = "\n\n".join(
|
||||
[
|
||||
system_message_prefix,
|
||||
tool_strings,
|
||||
format_instructions,
|
||||
system_message_suffix,
|
||||
]
|
||||
)
|
||||
template = f"{system_message_prefix}\n\n{tool_strings}\n\n{format_instructions}\n\n{system_message_suffix}" # noqa: E501
|
||||
messages = [
|
||||
SystemMessagePromptTemplate.from_template(template),
|
||||
HumanMessagePromptTemplate.from_template(human_message),
|
||||
|
@ -57,7 +57,9 @@ class ChatOutputParser(AgentOutputParser):
|
||||
)
|
||||
raise OutputParserException(msg)
|
||||
return AgentAction(
|
||||
response["action"], response.get("action_input", {}), text
|
||||
response["action"],
|
||||
response.get("action_input", {}),
|
||||
text,
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
|
@ -1,5 +1,4 @@
|
||||
# flake8: noqa
|
||||
SYSTEM_MESSAGE_PREFIX = """Answer the following questions as best you can. You have access to the following tools:"""
|
||||
SYSTEM_MESSAGE_PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" # noqa: E501
|
||||
FORMAT_INSTRUCTIONS = """The way you use the tools is by specifying a json blob.
|
||||
Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).
|
||||
|
||||
@ -25,6 +24,6 @@ $JSON_BLOB
|
||||
Observation: the result of the action
|
||||
... (this Thought/Action/Observation can repeat N times)
|
||||
Thought: I now know the final answer
|
||||
Final Answer: the final answer to the original input question"""
|
||||
SYSTEM_MESSAGE_SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding."""
|
||||
Final Answer: the final answer to the original input question""" # noqa: E501
|
||||
SYSTEM_MESSAGE_SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding.""" # noqa: E501
|
||||
HUMAN_MESSAGE = "{input}\n\n{agent_scratchpad}"
|
||||
|
@ -36,7 +36,9 @@ class ConversationalAgent(Agent):
|
||||
|
||||
@classmethod
|
||||
def _get_default_output_parser(
|
||||
cls, ai_prefix: str = "AI", **kwargs: Any
|
||||
cls,
|
||||
ai_prefix: str = "AI",
|
||||
**kwargs: Any,
|
||||
) -> AgentOutputParser:
|
||||
return ConvoOutputParser(ai_prefix=ai_prefix)
|
||||
|
||||
@ -93,13 +95,15 @@ class ConversationalAgent(Agent):
|
||||
A PromptTemplate with the template assembled from the pieces here.
|
||||
"""
|
||||
tool_strings = "\n".join(
|
||||
[f"> {tool.name}: {tool.description}" for tool in tools]
|
||||
[f"> {tool.name}: {tool.description}" for tool in tools],
|
||||
)
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = format_instructions.format(
|
||||
tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix
|
||||
tool_names=tool_names,
|
||||
ai_prefix=ai_prefix,
|
||||
human_prefix=human_prefix,
|
||||
)
|
||||
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
|
||||
template = f"{prefix}\n\n{tool_strings}\n\n{format_instructions}\n\n{suffix}"
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "chat_history", "agent_scratchpad"]
|
||||
return PromptTemplate(template=template, input_variables=input_variables)
|
||||
@ -161,7 +165,7 @@ class ConversationalAgent(Agent):
|
||||
)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
_output_parser = output_parser or cls._get_default_output_parser(
|
||||
ai_prefix=ai_prefix
|
||||
ai_prefix=ai_prefix,
|
||||
)
|
||||
return cls(
|
||||
llm_chain=llm_chain,
|
||||
|
@ -34,7 +34,8 @@ class ConvoOutputParser(AgentOutputParser):
|
||||
|
||||
if f"{self.ai_prefix}:" in text:
|
||||
return AgentFinish(
|
||||
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
|
||||
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()},
|
||||
text,
|
||||
)
|
||||
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
|
||||
match = re.search(regex, text, re.DOTALL)
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa
|
||||
PREFIX = """Assistant is a large language model trained by OpenAI.
|
||||
|
||||
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
||||
@ -10,7 +9,7 @@ Overall, Assistant is a powerful tool that can help with a wide range of tasks a
|
||||
TOOLS:
|
||||
------
|
||||
|
||||
Assistant has access to the following tools:"""
|
||||
Assistant has access to the following tools:""" # noqa: E501
|
||||
FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
|
||||
|
||||
```
|
||||
@ -25,7 +24,7 @@ When you have a response to say to the Human, or if you do not need to use a too
|
||||
```
|
||||
Thought: Do I need to use a tool? No
|
||||
{ai_prefix}: [your response here]
|
||||
```"""
|
||||
```""" # noqa: E501
|
||||
|
||||
SUFFIX = """Begin!
|
||||
|
||||
|
@ -96,15 +96,16 @@ class ConversationalChatAgent(Agent):
|
||||
A PromptTemplate.
|
||||
"""
|
||||
tool_strings = "\n".join(
|
||||
[f"> {tool.name}: {tool.description}" for tool in tools]
|
||||
[f"> {tool.name}: {tool.description}" for tool in tools],
|
||||
)
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
_output_parser = output_parser or cls._get_default_output_parser()
|
||||
format_instructions = human_message.format(
|
||||
format_instructions=_output_parser.get_format_instructions()
|
||||
format_instructions=_output_parser.get_format_instructions(),
|
||||
)
|
||||
final_prompt = format_instructions.format(
|
||||
tool_names=tool_names, tools=tool_strings
|
||||
tool_names=tool_names,
|
||||
tools=tool_strings,
|
||||
)
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "chat_history", "agent_scratchpad"]
|
||||
@ -117,14 +118,15 @@ class ConversationalChatAgent(Agent):
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
) -> list[BaseMessage]:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts: list[BaseMessage] = []
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts.append(AIMessage(content=action.log))
|
||||
human_message = HumanMessage(
|
||||
content=self.template_tool_response.format(observation=observation)
|
||||
content=self.template_tool_response.format(observation=observation),
|
||||
)
|
||||
thoughts.append(human_message)
|
||||
return thoughts
|
||||
|
@ -1,11 +1,10 @@
|
||||
# flake8: noqa
|
||||
PREFIX = """Assistant is a large language model trained by OpenAI.
|
||||
|
||||
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
||||
|
||||
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
|
||||
|
||||
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist."""
|
||||
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.""" # noqa: E501
|
||||
|
||||
FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS
|
||||
----------------------------
|
||||
@ -31,7 +30,7 @@ Use this if you want to respond directly to the human. Markdown code snippet for
|
||||
"action": "Final Answer",
|
||||
"action_input": string \\\\ You should put what you want to return to use here
|
||||
}}}}
|
||||
```"""
|
||||
```""" # noqa: E501
|
||||
|
||||
SUFFIX = """TOOLS
|
||||
------
|
||||
@ -45,13 +44,13 @@ USER'S INPUT
|
||||
--------------------
|
||||
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
|
||||
|
||||
{{{{input}}}}"""
|
||||
{{{{input}}}}""" # noqa: E501
|
||||
|
||||
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
|
||||
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
|
||||
---------------------
|
||||
{observation}
|
||||
|
||||
USER'S INPUT
|
||||
--------------------
|
||||
|
||||
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else."""
|
||||
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.""" # noqa: E501
|
||||
|
@ -20,7 +20,7 @@ def format_log_to_messages(
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts.append(AIMessage(content=action.log))
|
||||
human_message = HumanMessage(
|
||||
content=template_tool_response.format(observation=observation)
|
||||
content=template_tool_response.format(observation=observation),
|
||||
)
|
||||
thoughts.append(human_message)
|
||||
return thoughts
|
||||
|
@ -6,7 +6,8 @@ from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage
|
||||
|
||||
|
||||
def _convert_agent_action_to_messages(
|
||||
agent_action: AgentAction, observation: str
|
||||
agent_action: AgentAction,
|
||||
observation: str,
|
||||
) -> list[BaseMessage]:
|
||||
"""Convert an agent action to a message.
|
||||
|
||||
@ -28,7 +29,8 @@ def _convert_agent_action_to_messages(
|
||||
|
||||
|
||||
def _create_function_message(
|
||||
agent_action: AgentAction, observation: str
|
||||
agent_action: AgentAction,
|
||||
observation: str,
|
||||
) -> FunctionMessage:
|
||||
"""Convert agent action and observation into a function message.
|
||||
Args:
|
||||
|
@ -12,7 +12,8 @@ from langchain.agents.output_parsers.tools import ToolAgentAction
|
||||
|
||||
|
||||
def _create_tool_message(
|
||||
agent_action: ToolAgentAction, observation: str
|
||||
agent_action: ToolAgentAction,
|
||||
observation: str,
|
||||
) -> ToolMessage:
|
||||
"""Convert agent action and observation into a tool message.
|
||||
|
||||
|
@ -76,11 +76,17 @@ def initialize_agent(
|
||||
agent_cls = AGENT_TO_CLASS[agent]
|
||||
agent_kwargs = agent_kwargs or {}
|
||||
agent_obj = agent_cls.from_llm_and_tools(
|
||||
llm, tools, callback_manager=callback_manager, **agent_kwargs
|
||||
llm,
|
||||
tools,
|
||||
callback_manager=callback_manager,
|
||||
**agent_kwargs,
|
||||
)
|
||||
elif agent_path is not None:
|
||||
agent_obj = load_agent(
|
||||
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
|
||||
agent_path,
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
with contextlib.suppress(NotImplementedError):
|
||||
# TODO: Add tags from the serialized object directly.
|
||||
|
@ -162,7 +162,7 @@ def create_json_chat_agent(
|
||||
)
|
||||
""" # noqa: E501
|
||||
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
@ -185,8 +185,9 @@ def create_json_chat_agent(
|
||||
return (
|
||||
RunnablePassthrough.assign(
|
||||
agent_scratchpad=lambda x: format_log_to_messages(
|
||||
x["intermediate_steps"], template_tool_response=template_tool_response
|
||||
)
|
||||
x["intermediate_steps"],
|
||||
template_tool_response=template_tool_response,
|
||||
),
|
||||
)
|
||||
| prompt
|
||||
| llm_to_use
|
||||
|
@ -1,9 +1,8 @@
|
||||
# flake8: noqa
|
||||
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
|
||||
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
|
||||
---------------------
|
||||
{observation}
|
||||
|
||||
USER'S INPUT
|
||||
--------------------
|
||||
|
||||
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!"""
|
||||
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!""" # noqa: E501
|
||||
|
@ -3,7 +3,8 @@ from typing import Any
|
||||
from langchain._api import create_importer
|
||||
|
||||
_importer = create_importer(
|
||||
__package__, fallback_module="langchain_community.agent_toolkits.load_tools"
|
||||
__package__,
|
||||
fallback_module="langchain_community.agent_toolkits.load_tools",
|
||||
)
|
||||
|
||||
|
||||
|
@ -20,7 +20,10 @@ URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/age
|
||||
|
||||
|
||||
def _load_agent_from_tools(
|
||||
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
|
||||
config: dict,
|
||||
llm: BaseLanguageModel,
|
||||
tools: list[Tool],
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
|
||||
config_type = config.pop("_type")
|
||||
if config_type not in AGENT_TO_CLASS:
|
||||
@ -87,7 +90,7 @@ def load_agent_from_config(
|
||||
if "output_parser" in config:
|
||||
logger.warning(
|
||||
"Currently loading output parsers on agent is not supported, "
|
||||
"will just use the default one."
|
||||
"will just use the default one.",
|
||||
)
|
||||
del config["output_parser"]
|
||||
|
||||
@ -97,7 +100,8 @@ def load_agent_from_config(
|
||||
|
||||
@deprecated("0.1.0", removal="1.0")
|
||||
def load_agent(
|
||||
path: Union[str, Path], **kwargs: Any
|
||||
path: Union[str, Path],
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
|
||||
"""Unified method for loading an agent from LangChainHub or local fs.
|
||||
|
||||
@ -123,7 +127,8 @@ def load_agent(
|
||||
|
||||
|
||||
def _load_agent_from_file(
|
||||
file: Union[str, Path], **kwargs: Any
|
||||
file: Union[str, Path],
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
|
||||
"""Load agent from file."""
|
||||
valid_suffixes = {"json", "yaml"}
|
||||
|
@ -104,7 +104,7 @@ class ZeroShotAgent(Agent):
|
||||
tool_strings = render_text_description(list(tools))
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = format_instructions.format(tool_names=tool_names)
|
||||
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
|
||||
template = f"{prefix}\n\n{tool_strings}\n\n{format_instructions}\n\n{suffix}"
|
||||
if input_variables:
|
||||
return PromptTemplate(template=template, input_variables=input_variables)
|
||||
return PromptTemplate.from_template(template)
|
||||
@ -186,7 +186,10 @@ class MRKLChain(AgentExecutor):
|
||||
|
||||
@classmethod
|
||||
def from_chains(
|
||||
cls, llm: BaseLanguageModel, chains: list[ChainConfig], **kwargs: Any
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
chains: list[ChainConfig],
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""User-friendly way to initialize the MRKL chain.
|
||||
|
||||
|
@ -53,7 +53,8 @@ class MRKLOutputParser(AgentOutputParser):
|
||||
start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION)
|
||||
end_index = text.find("\n\n", start_index)
|
||||
return AgentFinish(
|
||||
{"output": text[start_index:end_index].strip()}, text[:end_index]
|
||||
{"output": text[start_index:end_index].strip()},
|
||||
text[:end_index],
|
||||
)
|
||||
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
||||
raise OutputParserException(msg)
|
||||
@ -70,7 +71,8 @@ class MRKLOutputParser(AgentOutputParser):
|
||||
|
||||
if includes_answer:
|
||||
return AgentFinish(
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()},
|
||||
text,
|
||||
)
|
||||
|
||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||
@ -82,7 +84,9 @@ class MRKLOutputParser(AgentOutputParser):
|
||||
send_to_llm=True,
|
||||
)
|
||||
if not re.search(
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)",
|
||||
text,
|
||||
re.DOTALL,
|
||||
):
|
||||
msg = f"Could not parse LLM output: `{text}`"
|
||||
raise OutputParserException(
|
||||
|
@ -1,5 +1,4 @@
|
||||
# flake8: noqa
|
||||
PREFIX = """Answer the following questions as best you can. You have access to the following tools:"""
|
||||
PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" # noqa: E501
|
||||
FORMAT_INSTRUCTIONS = """Use the following format:
|
||||
|
||||
Question: the input question you must answer
|
||||
|
@ -273,7 +273,10 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
|
||||
@override
|
||||
def invoke(
|
||||
self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
|
||||
self,
|
||||
input: dict,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
**kwargs: Any,
|
||||
) -> OutputType:
|
||||
"""Invoke assistant.
|
||||
|
||||
@ -314,13 +317,15 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
inheritable_metadata=config.get("metadata"),
|
||||
)
|
||||
run_manager = callback_manager.on_chain_start(
|
||||
dumpd(self), input, name=config.get("run_name") or self.get_name()
|
||||
dumpd(self),
|
||||
input,
|
||||
name=config.get("run_name") or self.get_name(),
|
||||
)
|
||||
try:
|
||||
# Being run within AgentExecutor and there are tool outputs to submit.
|
||||
if self.as_agent and input.get("intermediate_steps"):
|
||||
tool_outputs = self._parse_intermediate_steps(
|
||||
input["intermediate_steps"]
|
||||
input["intermediate_steps"],
|
||||
)
|
||||
run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs)
|
||||
# Starting a new thread and a new run.
|
||||
@ -332,7 +337,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
"content": input["content"],
|
||||
"metadata": input.get("message_metadata"),
|
||||
"attachments": input.get("attachments"),
|
||||
}
|
||||
},
|
||||
],
|
||||
"metadata": input.get("thread_metadata"),
|
||||
}
|
||||
@ -401,7 +406,10 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
|
||||
@override
|
||||
async def ainvoke(
|
||||
self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
|
||||
self,
|
||||
input: dict,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
**kwargs: Any,
|
||||
) -> OutputType:
|
||||
"""Async invoke assistant.
|
||||
|
||||
@ -442,16 +450,18 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
inheritable_metadata=config.get("metadata"),
|
||||
)
|
||||
run_manager = callback_manager.on_chain_start(
|
||||
dumpd(self), input, name=config.get("run_name") or self.get_name()
|
||||
dumpd(self),
|
||||
input,
|
||||
name=config.get("run_name") or self.get_name(),
|
||||
)
|
||||
try:
|
||||
# Being run within AgentExecutor and there are tool outputs to submit.
|
||||
if self.as_agent and input.get("intermediate_steps"):
|
||||
tool_outputs = await self._aparse_intermediate_steps(
|
||||
input["intermediate_steps"]
|
||||
input["intermediate_steps"],
|
||||
)
|
||||
run = await self.async_client.beta.threads.runs.submit_tool_outputs(
|
||||
**tool_outputs
|
||||
**tool_outputs,
|
||||
)
|
||||
# Starting a new thread and a new run.
|
||||
elif "thread_id" not in input:
|
||||
@ -461,7 +471,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
"role": "user",
|
||||
"content": input["content"],
|
||||
"metadata": input.get("message_metadata"),
|
||||
}
|
||||
},
|
||||
],
|
||||
"metadata": input.get("thread_metadata"),
|
||||
}
|
||||
@ -479,7 +489,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
# framework.
|
||||
else:
|
||||
run = await self.async_client.beta.threads.runs.submit_tool_outputs(
|
||||
**input
|
||||
**input,
|
||||
)
|
||||
run = await self._await_for_run(run.id, run.thread_id)
|
||||
except BaseException as e:
|
||||
@ -495,7 +505,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
return response
|
||||
|
||||
def _parse_intermediate_steps(
|
||||
self, intermediate_steps: list[tuple[OpenAIAssistantAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[OpenAIAssistantAction, str]],
|
||||
) -> dict:
|
||||
last_action, last_output = intermediate_steps[-1]
|
||||
run = self._wait_for_run(last_action.run_id, last_action.thread_id)
|
||||
@ -575,7 +586,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
)
|
||||
|
||||
messages = self.client.beta.threads.messages.list(
|
||||
run.thread_id, order="asc"
|
||||
run.thread_id,
|
||||
order="asc",
|
||||
)
|
||||
new_messages = [msg for msg in messages if msg.run_id == run.id]
|
||||
if not self.as_agent:
|
||||
@ -633,7 +645,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
log="",
|
||||
run_id=run.id,
|
||||
thread_id=run.thread_id,
|
||||
)
|
||||
),
|
||||
)
|
||||
return actions
|
||||
run_info = json.dumps(run.dict(), indent=2)
|
||||
@ -650,7 +662,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
return run
|
||||
|
||||
async def _aparse_intermediate_steps(
|
||||
self, intermediate_steps: list[tuple[OpenAIAssistantAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[OpenAIAssistantAction, str]],
|
||||
) -> dict:
|
||||
last_action, last_output = intermediate_steps[-1]
|
||||
run = self._wait_for_run(last_action.run_id, last_action.thread_id)
|
||||
@ -730,7 +743,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
)
|
||||
|
||||
messages = await self.async_client.beta.threads.messages.list(
|
||||
run.thread_id, order="asc"
|
||||
run.thread_id,
|
||||
order="asc",
|
||||
)
|
||||
new_messages = [msg for msg in messages if msg.run_id == run.id]
|
||||
if not self.as_agent:
|
||||
@ -784,7 +798,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
log="",
|
||||
run_id=run.id,
|
||||
thread_id=run.thread_id,
|
||||
)
|
||||
),
|
||||
)
|
||||
return actions
|
||||
run_info = json.dumps(run.dict(), indent=2)
|
||||
@ -795,7 +809,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
in_progress = True
|
||||
while in_progress:
|
||||
run = await self.async_client.beta.threads.runs.retrieve(
|
||||
run_id, thread_id=thread_id
|
||||
run_id,
|
||||
thread_id=thread_id,
|
||||
)
|
||||
in_progress = run.status in ("in_progress", "queued")
|
||||
if in_progress:
|
||||
|
@ -161,7 +161,9 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
prompt = self.prompt.format_prompt(**full_inputs)
|
||||
messages = prompt.to_messages()
|
||||
predicted_message = await self.llm.apredict_messages(
|
||||
messages, functions=self.functions, callbacks=callbacks
|
||||
messages,
|
||||
functions=self.functions,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
return self.output_parser._parse_ai_message(predicted_message)
|
||||
|
||||
@ -188,12 +190,15 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
if early_stopping_method == "force":
|
||||
# `force` just returns a constant string
|
||||
return AgentFinish(
|
||||
{"output": "Agent stopped due to iteration limit or time limit."}, ""
|
||||
{"output": "Agent stopped due to iteration limit or time limit."},
|
||||
"",
|
||||
)
|
||||
if early_stopping_method == "generate":
|
||||
# Generate does one final forward pass
|
||||
agent_decision = self.plan(
|
||||
intermediate_steps, with_functions=False, **kwargs
|
||||
intermediate_steps,
|
||||
with_functions=False,
|
||||
**kwargs,
|
||||
)
|
||||
if isinstance(agent_decision, AgentFinish):
|
||||
return agent_decision
|
||||
@ -209,7 +214,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
def create_prompt(
|
||||
cls,
|
||||
system_message: Optional[SystemMessage] = SystemMessage(
|
||||
content="You are a helpful AI assistant."
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
||||
) -> ChatPromptTemplate:
|
||||
@ -233,7 +238,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
*_prompts,
|
||||
HumanMessagePromptTemplate.from_template("{input}"),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
],
|
||||
)
|
||||
return ChatPromptTemplate(messages=messages)
|
||||
|
||||
@ -245,7 +250,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
||||
system_message: Optional[SystemMessage] = SystemMessage(
|
||||
content="You are a helpful AI assistant."
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
**kwargs: Any,
|
||||
) -> BaseSingleActionAgent:
|
||||
@ -274,7 +279,9 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
|
||||
|
||||
def create_openai_functions_agent(
|
||||
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: ChatPromptTemplate,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses OpenAI function calling.
|
||||
|
||||
@ -357,8 +364,8 @@ def create_openai_functions_agent(
|
||||
return (
|
||||
RunnablePassthrough.assign(
|
||||
agent_scratchpad=lambda x: format_to_openai_function_messages(
|
||||
x["intermediate_steps"]
|
||||
)
|
||||
x["intermediate_steps"],
|
||||
),
|
||||
)
|
||||
| prompt
|
||||
| llm_with_tools
|
||||
|
@ -95,7 +95,8 @@ def _parse_ai_message(message: BaseMessage) -> Union[list[AgentAction], AgentFin
|
||||
return final_tools
|
||||
|
||||
return AgentFinish(
|
||||
return_values={"output": message.content}, log=str(message.content)
|
||||
return_values={"output": message.content},
|
||||
log=str(message.content),
|
||||
)
|
||||
|
||||
|
||||
@ -190,7 +191,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
},
|
||||
"required": ["action_name", "action"],
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
"required": ["actions"],
|
||||
},
|
||||
@ -222,7 +223,9 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
prompt = self.prompt.format_prompt(**full_inputs)
|
||||
messages = prompt.to_messages()
|
||||
predicted_message = self.llm.predict_messages(
|
||||
messages, functions=self.functions, callbacks=callbacks
|
||||
messages,
|
||||
functions=self.functions,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
return _parse_ai_message(predicted_message)
|
||||
|
||||
@ -251,7 +254,9 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
prompt = self.prompt.format_prompt(**full_inputs)
|
||||
messages = prompt.to_messages()
|
||||
predicted_message = await self.llm.apredict_messages(
|
||||
messages, functions=self.functions, callbacks=callbacks
|
||||
messages,
|
||||
functions=self.functions,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
return _parse_ai_message(predicted_message)
|
||||
|
||||
@ -259,7 +264,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
def create_prompt(
|
||||
cls,
|
||||
system_message: Optional[SystemMessage] = SystemMessage(
|
||||
content="You are a helpful AI assistant."
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
||||
) -> BasePromptTemplate:
|
||||
@ -283,7 +288,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
*_prompts,
|
||||
HumanMessagePromptTemplate.from_template("{input}"),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
],
|
||||
)
|
||||
return ChatPromptTemplate(messages=messages)
|
||||
|
||||
@ -295,7 +300,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
||||
system_message: Optional[SystemMessage] = SystemMessage(
|
||||
content="You are a helpful AI assistant."
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
**kwargs: Any,
|
||||
) -> BaseMultiActionAgent:
|
||||
|
@ -86,21 +86,21 @@ def create_openai_tools_agent(
|
||||
)
|
||||
"""
|
||||
missing_vars = {"agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
raise ValueError(msg)
|
||||
|
||||
llm_with_tools = llm.bind(
|
||||
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools]
|
||||
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools],
|
||||
)
|
||||
|
||||
return (
|
||||
RunnablePassthrough.assign(
|
||||
agent_scratchpad=lambda x: format_to_openai_tool_messages(
|
||||
x["intermediate_steps"]
|
||||
)
|
||||
x["intermediate_steps"],
|
||||
),
|
||||
)
|
||||
| prompt
|
||||
| llm_with_tools
|
||||
|
@ -75,11 +75,15 @@ class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
|
||||
)
|
||||
|
||||
return AgentFinish(
|
||||
return_values={"output": message.content}, log=str(message.content)
|
||||
return_values={"output": message.content},
|
||||
log=str(message.content),
|
||||
)
|
||||
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
self,
|
||||
result: list[Generation],
|
||||
*,
|
||||
partial: bool = False,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
if not isinstance(result[0], ChatGeneration):
|
||||
msg = "This output parser only works on ChatGeneration output"
|
||||
|
@ -30,7 +30,7 @@ def parse_ai_message_to_openai_tool_action(
|
||||
log=action.log,
|
||||
message_log=action.message_log,
|
||||
tool_call_id=action.tool_call_id,
|
||||
)
|
||||
),
|
||||
)
|
||||
else:
|
||||
final_actions.append(action)
|
||||
@ -54,7 +54,10 @@ class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
|
||||
return "openai-tools-agent-output-parser"
|
||||
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
self,
|
||||
result: list[Generation],
|
||||
*,
|
||||
partial: bool = False,
|
||||
) -> Union[list[AgentAction], AgentFinish]:
|
||||
if not isinstance(result[0], ChatGeneration):
|
||||
msg = "This output parser only works on ChatGeneration output"
|
||||
|
@ -67,7 +67,9 @@ class ReActJsonSingleInputOutputParser(AgentOutputParser):
|
||||
)
|
||||
raise OutputParserException(msg)
|
||||
return AgentAction(
|
||||
response["action"], response.get("action_input", {}), text
|
||||
response["action"],
|
||||
response.get("action_input", {}),
|
||||
text,
|
||||
)
|
||||
|
||||
except Exception:
|
||||
|
@ -67,7 +67,8 @@ class ReActSingleInputOutputParser(AgentOutputParser):
|
||||
|
||||
if includes_answer:
|
||||
return AgentFinish(
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()},
|
||||
text,
|
||||
)
|
||||
|
||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||
@ -79,7 +80,9 @@ class ReActSingleInputOutputParser(AgentOutputParser):
|
||||
send_to_llm=True,
|
||||
)
|
||||
if not re.search(
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)",
|
||||
text,
|
||||
re.DOTALL,
|
||||
):
|
||||
msg = f"Could not parse LLM output: `{text}`"
|
||||
raise OutputParserException(
|
||||
|
@ -33,7 +33,8 @@ def parse_ai_message_to_tool_action(
|
||||
else:
|
||||
if not message.additional_kwargs.get("tool_calls"):
|
||||
return AgentFinish(
|
||||
return_values={"output": message.content}, log=str(message.content)
|
||||
return_values={"output": message.content},
|
||||
log=str(message.content),
|
||||
)
|
||||
# Best-effort parsing
|
||||
tool_calls = []
|
||||
@ -43,7 +44,7 @@ def parse_ai_message_to_tool_action(
|
||||
try:
|
||||
args = json.loads(function["arguments"] or "{}")
|
||||
tool_calls.append(
|
||||
ToolCall(name=function_name, args=args, id=tool_call["id"])
|
||||
ToolCall(name=function_name, args=args, id=tool_call["id"]),
|
||||
)
|
||||
except JSONDecodeError:
|
||||
msg = (
|
||||
@ -71,7 +72,7 @@ def parse_ai_message_to_tool_action(
|
||||
log=log,
|
||||
message_log=[message],
|
||||
tool_call_id=tool_call["id"],
|
||||
)
|
||||
),
|
||||
)
|
||||
return actions
|
||||
|
||||
@ -90,7 +91,10 @@ class ToolsAgentOutputParser(MultiActionAgentOutputParser):
|
||||
return "tools-agent-output-parser"
|
||||
|
||||
def parse_result(
|
||||
self, result: list[Generation], *, partial: bool = False
|
||||
self,
|
||||
result: list[Generation],
|
||||
*,
|
||||
partial: bool = False,
|
||||
) -> Union[list[AgentAction], AgentFinish]:
|
||||
if not isinstance(result[0], ChatGeneration):
|
||||
msg = "This output parser only works on ChatGeneration output"
|
||||
|
@ -118,7 +118,7 @@ def create_react_agent(
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
""" # noqa: E501
|
||||
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
|
||||
EXAMPLES = [
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
|
||||
EXAMPLES = [
|
||||
@ -16,7 +15,7 @@ Thought: I need to instead search High Plains (United States).
|
||||
Action: Search[High Plains (United States)]
|
||||
Observation: The High Plains are a subregion of the Great Plains. From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m).[3]
|
||||
Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft.
|
||||
Action: Finish[1,800 to 7,000 ft]""",
|
||||
Action: Finish[1,800 to 7,000 ft]""", # noqa: E501
|
||||
"""Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who Matt Groening named after who?
|
||||
Thought: The question simplifies to "The Simpsons" character Milhouse is named after who. I only need to search Milhouse and find who it is named after.
|
||||
Action: Search[Milhouse]
|
||||
@ -25,16 +24,16 @@ Thought: The paragraph does not tell who Milhouse is named after, maybe I can lo
|
||||
Action: Lookup[named after]
|
||||
Observation: (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose middle name was Milhous.
|
||||
Thought: Milhouse was named after U.S. president Richard Nixon, so the answer is Richard Nixon.
|
||||
Action: Finish[Richard Nixon]""",
|
||||
Action: Finish[Richard Nixon]""", # noqa: E501
|
||||
"""Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The Saimaa Gesture?
|
||||
Thought: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which documentary is about Finnish rock groups.
|
||||
Action: Search[Adam Clayton Powell]
|
||||
Observation: Could not find [Adam Clayton Powell]. Similar: [’Adam Clayton Powell III’, ’Seventh Avenue (Manhattan)’, ’Adam Clayton Powell Jr. State Office Building’, ’Isabel Washington Powell’, ’Adam Powell’, ’Adam Clayton Powell (film)’, ’Giancarlo Esposito’].
|
||||
Observation: Could not find [Adam Clayton Powell]. Similar: ['Adam Clayton Powell III', 'Seventh Avenue (Manhattan)', 'Adam Clayton Powell Jr. State Office Building', 'Isabel Washington Powell', 'Adam Powell', 'Adam Clayton Powell (film)', 'Giancarlo Esposito'].
|
||||
Thought: To find the documentary, I can search Adam Clayton Powell (film).
|
||||
Action: Search[Adam Clayton Powell (film)]
|
||||
Observation: Adam Clayton Powell is a 1989 American documentary film directed by Richard Kilberg. The film is about the rise and fall of influential African-American politician Adam Clayton Powell Jr.[3][4] It was later aired as part of the PBS series The American Experience.
|
||||
Thought: Adam Clayton Powell (film) is a documentary about an African-American politician, not Finnish rock groups. So the documentary about Finnish rock groups must instead be The Saimaa Gesture.
|
||||
Action: Finish[The Saimaa Gesture]""",
|
||||
Action: Finish[The Saimaa Gesture]""", # noqa: E501
|
||||
"""Question: What profession does Nicholas Ray and Elia Kazan have in common?
|
||||
Thought: I need to search Nicholas Ray and Elia Kazan, find their professions, then find the profession they have in common.
|
||||
Action: Search[Nicholas Ray]
|
||||
@ -43,16 +42,16 @@ Thought: Professions of Nicholas Ray are director, screenwriter, and actor. I ne
|
||||
Action: Search[Elia Kazan]
|
||||
Observation: Elia Kazan was an American film and theatre director, producer, screenwriter and actor.
|
||||
Thought: Professions of Elia Kazan are director, producer, screenwriter, and actor. So profession Nicholas Ray and Elia Kazan have in common is director, screenwriter, and actor.
|
||||
Action: Finish[director, screenwriter, actor]""",
|
||||
"""Question: Which magazine was started first Arthur’s Magazine or First for Women?
|
||||
Thought: I need to search Arthur’s Magazine and First for Women, and find which was started first.
|
||||
Action: Search[Arthur’s Magazine]
|
||||
Observation: Arthur’s Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century.
|
||||
Thought: Arthur’s Magazine was started in 1844. I need to search First for Women next.
|
||||
Action: Finish[director, screenwriter, actor]""", # noqa: E501
|
||||
"""Question: Which magazine was started first Arthur's Magazine or First for Women?
|
||||
Thought: I need to search Arthur's Magazine and First for Women, and find which was started first.
|
||||
Action: Search[Arthur's Magazine]
|
||||
Observation: Arthur's Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century.
|
||||
Thought: Arthur's Magazine was started in 1844. I need to search First for Women next.
|
||||
Action: Search[First for Women]
|
||||
Observation: First for Women is a woman’s magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989.
|
||||
Thought: First for Women was started in 1989. 1844 (Arthur’s Magazine) < 1989 (First for Women), so Arthur’s Magazine was started first.
|
||||
Action: Finish[Arthur’s Magazine]""",
|
||||
Observation: First for Women is a woman's magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989.
|
||||
Thought: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First for Women), so Arthur's Magazine was started first.
|
||||
Action: Finish[Arthur's Magazine]""", # noqa: E501
|
||||
"""Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?
|
||||
Thought: I need to search Pavel Urysohn and Leonid Levin, find their types of work, then find if they are the same.
|
||||
Action: Search[Pavel Urysohn]
|
||||
@ -61,7 +60,7 @@ Thought: Pavel Urysohn is a mathematician. I need to search Leonid Levin next an
|
||||
Action: Search[Leonid Levin]
|
||||
Observation: Leonid Anatolievich Levin is a Soviet-American mathematician and computer scientist.
|
||||
Thought: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn and Leonid Levin have the same type of work.
|
||||
Action: Finish[yes]""",
|
||||
Action: Finish[yes]""", # noqa: E501
|
||||
]
|
||||
SUFFIX = """\nQuestion: {input}
|
||||
{agent_scratchpad}"""
|
||||
|
@ -12,7 +12,8 @@ class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
|
||||
return False
|
||||
|
||||
def _construct_agent_scratchpad(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
) -> str:
|
||||
if len(intermediate_steps) == 0:
|
||||
return ""
|
||||
@ -29,6 +30,6 @@ class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
|
||||
def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
|
||||
intermediate_steps = kwargs.pop("intermediate_steps")
|
||||
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
|
||||
intermediate_steps
|
||||
intermediate_steps,
|
||||
)
|
||||
return kwargs
|
||||
|
@ -76,7 +76,9 @@ class SelfAskWithSearchChain(AgentExecutor):
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
search_chain: Union[
|
||||
GoogleSerperAPIWrapper, SearchApiAPIWrapper, SerpAPIWrapper
|
||||
GoogleSerperAPIWrapper,
|
||||
SearchApiAPIWrapper,
|
||||
SerpAPIWrapper,
|
||||
],
|
||||
**kwargs: Any,
|
||||
):
|
||||
@ -92,7 +94,9 @@ class SelfAskWithSearchChain(AgentExecutor):
|
||||
|
||||
|
||||
def create_self_ask_with_search_agent(
|
||||
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
prompt: BasePromptTemplate,
|
||||
) -> Runnable:
|
||||
"""Create an agent that uses self-ask with search prompting.
|
||||
|
||||
@ -180,7 +184,7 @@ def create_self_ask_with_search_agent(
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
""" # noqa: E501
|
||||
missing_vars = {"agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
|
||||
_DEFAULT_TEMPLATE = """Question: Who lived longer, Muhammad Ali or Alan Turing?
|
||||
|
@ -35,7 +35,7 @@ class StructuredChatAgent(Agent):
|
||||
"""Structured Chat Agent."""
|
||||
|
||||
output_parser: AgentOutputParser = Field(
|
||||
default_factory=StructuredChatOutputParserWithRetries
|
||||
default_factory=StructuredChatOutputParserWithRetries,
|
||||
)
|
||||
"""Output parser for the agent."""
|
||||
|
||||
@ -50,7 +50,8 @@ class StructuredChatAgent(Agent):
|
||||
return "Thought:"
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: list[tuple[AgentAction, str]]
|
||||
self,
|
||||
intermediate_steps: list[tuple[AgentAction, str]],
|
||||
) -> str:
|
||||
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
|
||||
if not isinstance(agent_scratchpad, str):
|
||||
@ -70,7 +71,9 @@ class StructuredChatAgent(Agent):
|
||||
|
||||
@classmethod
|
||||
def _get_default_output_parser(
|
||||
cls, llm: Optional[BaseLanguageModel] = None, **kwargs: Any
|
||||
cls,
|
||||
llm: Optional[BaseLanguageModel] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentOutputParser:
|
||||
return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
|
||||
|
||||
@ -96,7 +99,7 @@ class StructuredChatAgent(Agent):
|
||||
formatted_tools = "\n".join(tool_strings)
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = format_instructions.format(tool_names=tool_names)
|
||||
template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix])
|
||||
template = f"{prefix}\n\n{formatted_tools}\n\n{format_instructions}\n\n{suffix}"
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "agent_scratchpad"]
|
||||
_memory_prompts = memory_prompts or []
|
||||
@ -275,7 +278,7 @@ def create_structured_chat_agent(
|
||||
)
|
||||
""" # noqa: E501
|
||||
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
|
@ -43,7 +43,9 @@ class StructuredChatOutputParser(AgentOutputParser):
|
||||
if response["action"] == "Final Answer":
|
||||
return AgentFinish({"output": response["action_input"]}, text)
|
||||
return AgentAction(
|
||||
response["action"], response.get("action_input", {}), text
|
||||
response["action"],
|
||||
response.get("action_input", {}),
|
||||
text,
|
||||
)
|
||||
return AgentFinish({"output": text}, text)
|
||||
except Exception as e:
|
||||
@ -88,7 +90,8 @@ class StructuredChatOutputParserWithRetries(AgentOutputParser):
|
||||
if llm is not None:
|
||||
base_parser = base_parser or StructuredChatOutputParser()
|
||||
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
|
||||
llm=llm, parser=base_parser
|
||||
llm=llm,
|
||||
parser=base_parser,
|
||||
)
|
||||
return cls(output_fixing_parser=output_fixing_parser)
|
||||
if base_parser is not None:
|
||||
|
@ -1,5 +1,4 @@
|
||||
# flake8: noqa
|
||||
PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:"""
|
||||
PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" # noqa: E501
|
||||
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
|
||||
|
||||
Valid "action" values: "Final Answer" or {tool_names}
|
||||
@ -30,6 +29,6 @@ Action:
|
||||
"action": "Final Answer",
|
||||
"action_input": "Final response to human"
|
||||
}}}}
|
||||
```"""
|
||||
```""" # noqa: E501
|
||||
SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
|
||||
Thought:"""
|
||||
Thought:""" # noqa: E501
|
||||
|
@ -87,7 +87,7 @@ def create_tool_calling_agent(
|
||||
messages will be passed in here.
|
||||
"""
|
||||
missing_vars = {"agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
@ -102,7 +102,7 @@ def create_tool_calling_agent(
|
||||
|
||||
return (
|
||||
RunnablePassthrough.assign(
|
||||
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
|
||||
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"]),
|
||||
)
|
||||
| prompt
|
||||
| llm_with_tools
|
||||
|
@ -52,7 +52,7 @@ class XMLAgent(BaseSingleActionAgent):
|
||||
def get_default_prompt() -> ChatPromptTemplate:
|
||||
base_prompt = ChatPromptTemplate.from_template(agent_instructions)
|
||||
return base_prompt + AIMessagePromptTemplate.from_template(
|
||||
"{intermediate_steps}"
|
||||
"{intermediate_steps}",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@ -205,7 +205,7 @@ def create_xml_agent(
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
""" # noqa: E501
|
||||
missing_vars = {"tools", "agent_scratchpad"}.difference(
|
||||
prompt.input_variables + list(prompt.partial_variables)
|
||||
prompt.input_variables + list(prompt.partial_variables),
|
||||
)
|
||||
if missing_vars:
|
||||
msg = f"Prompt missing required variables: {missing_vars}"
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa
|
||||
# TODO: deprecate
|
||||
agent_instructions = """You are a helpful assistant. Help the user answer any questions.
|
||||
|
||||
@ -19,4 +18,4 @@ When you are done, respond with a final answer between <final_answer></final_ans
|
||||
|
||||
Begin!
|
||||
|
||||
Question: {question}"""
|
||||
Question: {question}""" # noqa: E501
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ArgillaCallbackHandler": "langchain_community.callbacks.argilla_callback"
|
||||
"ArgillaCallbackHandler": "langchain_community.callbacks.argilla_callback",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ArizeCallbackHandler": "langchain_community.callbacks.arize_callback"
|
||||
"ArizeCallbackHandler": "langchain_community.callbacks.arize_callback",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ArthurCallbackHandler": "langchain_community.callbacks.arthur_callback"
|
||||
"ArthurCallbackHandler": "langchain_community.callbacks.arthur_callback",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback"
|
||||
"ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback"
|
||||
"CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user