mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-23 19:39:58 +00:00
experimental[major]: Force users to opt-in into code that relies on the python repl (#22860)
This should make it obvious that a few of the agents in langchain experimental rely on the python REPL as a tool under the hood, and will force users to opt-in.
This commit is contained in:
@@ -8,7 +8,7 @@ from langchain_experimental.pal_chain.base import PALChain
|
||||
def test_math_prompt() -> None:
|
||||
"""Test math prompt."""
|
||||
llm = OpenAI(temperature=0, max_tokens=512)
|
||||
pal_chain = PALChain.from_math_prompt(llm, timeout=None)
|
||||
pal_chain = PALChain.from_math_prompt(llm, timeout=None, allow_dangerous_code=False)
|
||||
question = (
|
||||
"Jan has three times the number of pets as Marcia. "
|
||||
"Marcia has two more pets than Cindy. "
|
||||
@@ -21,7 +21,9 @@ def test_math_prompt() -> None:
|
||||
def test_colored_object_prompt() -> None:
|
||||
"""Test colored object prompt."""
|
||||
llm = OpenAI(temperature=0, max_tokens=512)
|
||||
pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None)
|
||||
pal_chain = PALChain.from_colored_object_prompt(
|
||||
llm, timeout=None, allow_dangerous_code=False
|
||||
)
|
||||
question = (
|
||||
"On the desk, you see two blue booklets, "
|
||||
"two purple booklets, and two yellow pairs of sunglasses. "
|
||||
|
Reference in New Issue
Block a user