experimental[major]: Force users to opt-in into code that relies on the python repl (#22860)

This should make it obvious that a few of the agents in langchain
experimental rely on the python REPL as a tool under the hood, and will
force users to opt-in.
This commit is contained in:
Eugene Yurtsev
2024-06-13 15:41:24 -04:00
committed by GitHub
parent 869523ad72
commit ce0b0f22a1
7 changed files with 168 additions and 13 deletions

View File

@@ -18,7 +18,7 @@ from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT
from langchain_experimental.pydantic_v1 import Extra, Field
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
COMMAND_EXECUTION_FUNCTIONS = ["system", "exec", "execfile", "eval", "__import__"]
COMMAND_EXECUTION_ATTRIBUTES = [
@@ -129,6 +129,36 @@ class PALChain(Chain):
"""Validations to perform on the generated code."""
timeout: Optional[int] = 10
"""Timeout in seconds for the generated code to execute."""
allow_dangerous_code: bool = False
"""This chain relies on the execution of generated code, which can be dangerous.
This class implements an AI technique that generates and evaluates
Python code, which can be dangerous and requires a specially sandboxed
environment to be safely used. While this class implements some basic guardrails
by limiting available locals/globals and by parsing and inspecting
the generated Python AST using `PALValidation`, those guardrails will not
deter sophisticated attackers and are not a replacement for a proper sandbox.
Do not use this class on untrusted inputs, with elevated permissions,
or without consulting your security team about proper sandboxing!
Failure to properly sandbox this class can lead to arbitrary code execution
vulnerabilities, which can lead to data breaches, data loss, or other security
incidents.
"""
@root_validator(pre=False, skip_on_failure=True)
def post_init(cls, values: Dict) -> Dict:
if not values["allow_dangerous_code"]:
raise ValueError(
"This chain relies on the execution of generated code, "
"which can be dangerous. "
"Please read the security notice for this class, and only "
"use it if you understand the security implications. "
"If you want to proceed, you will need to opt-in, by setting "
"`allow_dangerous_code` to `True`."
)
return values
class Config:
"""Configuration for this pydantic object."""