Harrison/verbose prompt (#159)

Add printing of prompt to LLMChain
This commit is contained in:
Harrison Chase 2022-11-19 20:39:35 -08:00 committed by GitHub
parent c02eb199b6
commit a19ad935b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 22 additions and 4 deletions

View File

@ -12,17 +12,32 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 1,
"id": "51a54c4d", "id": "51a54c4d",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mQuestion: What NFL team won the Super Bowl in the year Justin Beiber was born?\n",
"\n",
"Answer: Let's think step by step.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"' The year Justin Beiber was born was 1994. In 1994, the Dallas Cowboys won the Super Bowl.'" "' The year Justin Beiber was born was 1994. In 1994, the Dallas Cowboys won the Super Bowl.'"
] ]
}, },
"execution_count": 2, "execution_count": 1,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -34,7 +49,7 @@
"\n", "\n",
"Answer: Let's think step by step.\"\"\"\n", "Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0))\n", "llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)\n",
"\n", "\n",
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n", "\n",

View File

@ -4,6 +4,7 @@ from typing import Any, Dict, List
from pydantic import BaseModel, Extra from pydantic import BaseModel, Extra
from langchain.chains.base import Chain from langchain.chains.base import Chain
from langchain.input import print_text
from langchain.llms.base import LLM from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate from langchain.prompts.base import BasePromptTemplate
@ -53,7 +54,9 @@ class LLMChain(Chain, BaseModel):
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}
prompt = self.prompt.format(**selected_inputs) prompt = self.prompt.format(**selected_inputs)
if self.verbose:
print("Prompt after formatting:")
print_text(prompt, color="green", end="\n")
kwargs = {} kwargs = {}
if "stop" in inputs: if "stop" in inputs:
kwargs["stop"] = inputs["stop"] kwargs["stop"] = inputs["stop"]