experimental[major]: Force users to opt-in into code that relies on the python repl (#22860)

This should make it obvious that a few of the agents in langchain
experimental rely on the python REPL as a tool under the hood, and will
force users to opt-in.
This commit is contained in:
Eugene Yurtsev
2024-06-13 15:41:24 -04:00
committed by GitHub
parent 869523ad72
commit ce0b0f22a1
7 changed files with 168 additions and 13 deletions

View File

@@ -8,7 +8,7 @@ from langchain_experimental.pal_chain.base import PALChain
def test_math_prompt() -> None:
"""Test math prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_math_prompt(llm, timeout=None)
pal_chain = PALChain.from_math_prompt(llm, timeout=None, allow_dangerous_code=False)
question = (
"Jan has three times the number of pets as Marcia. "
"Marcia has two more pets than Cindy. "
@@ -21,7 +21,9 @@ def test_math_prompt() -> None:
def test_colored_object_prompt() -> None:
"""Test colored object prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None)
pal_chain = PALChain.from_colored_object_prompt(
llm, timeout=None, allow_dangerous_code=False
)
question = (
"On the desk, you see two blue booklets, "
"two purple booklets, and two yellow pairs of sunglasses. "