mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-16 17:53:37 +00:00
Harrison/callbacks (#1587)
This commit is contained in:
parent
90846dcc28
commit
9f78717b3c
@ -35,12 +35,28 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"import langchain\n",
|
"import langchain\n",
|
||||||
"from langchain.agents import Tool, initialize_agent, load_tools\n",
|
"from langchain.agents import Tool, initialize_agent, load_tools\n",
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
"from langchain.llms import OpenAI"
|
"from langchain.llms import OpenAI"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": 2,
|
||||||
|
"id": "1b62cd48",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.\n",
|
||||||
|
"\n",
|
||||||
|
"llm = OpenAI(temperature=0)\n",
|
||||||
|
"tools = load_tools([\"llm-math\"], llm=llm)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
"id": "bfa16b79-aa4b-4d41-a067-70d1f593f667",
|
"id": "bfa16b79-aa4b-4d41-a067-70d1f593f667",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"tags": []
|
"tags": []
|
||||||
@ -70,16 +86,12 @@
|
|||||||
"'1.0891804557407723'"
|
"'1.0891804557407723'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 2,
|
"execution_count": 3,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.\n",
|
|
||||||
"\n",
|
|
||||||
"llm = OpenAI(temperature=0)\n",
|
|
||||||
"tools = load_tools([\"llm-math\"], llm=llm)\n",
|
|
||||||
"agent = initialize_agent(\n",
|
"agent = initialize_agent(\n",
|
||||||
" tools, llm, agent=\"zero-shot-react-description\", verbose=True\n",
|
" tools, llm, agent=\"zero-shot-react-description\", verbose=True\n",
|
||||||
")\n",
|
")\n",
|
||||||
@ -87,10 +99,94 @@
|
|||||||
"agent.run(\"What is 2 raised to .123243 power?\")"
|
"agent.run(\"What is 2 raised to .123243 power?\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "4829eb1d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||||
|
"\u001b[32;1m\u001b[1;3mQuestion: What is 2 raised to .123243 power?\n",
|
||||||
|
"Thought: I need a calculator to solve this problem.\n",
|
||||||
|
"Action:\n",
|
||||||
|
"```\n",
|
||||||
|
"{\n",
|
||||||
|
" \"action\": \"calculator\",\n",
|
||||||
|
" \"action_input\": \"2^0.123243\"\n",
|
||||||
|
"}\n",
|
||||||
|
"```\n",
|
||||||
|
"\u001b[0m\n",
|
||||||
|
"Observation: calculator is not a valid tool, try another one.\n",
|
||||||
|
"\u001b[32;1m\u001b[1;3mI made a mistake, I need to use the correct tool for this question.\n",
|
||||||
|
"Action:\n",
|
||||||
|
"```\n",
|
||||||
|
"{\n",
|
||||||
|
" \"action\": \"calculator\",\n",
|
||||||
|
" \"action_input\": \"2^0.123243\"\n",
|
||||||
|
"}\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[0m\n",
|
||||||
|
"Observation: calculator is not a valid tool, try another one.\n",
|
||||||
|
"\u001b[32;1m\u001b[1;3mI made a mistake, the tool name is actually \"calc\" instead of \"calculator\".\n",
|
||||||
|
"Action:\n",
|
||||||
|
"```\n",
|
||||||
|
"{\n",
|
||||||
|
" \"action\": \"calc\",\n",
|
||||||
|
" \"action_input\": \"2^0.123243\"\n",
|
||||||
|
"}\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[0m\n",
|
||||||
|
"Observation: calc is not a valid tool, try another one.\n",
|
||||||
|
"\u001b[32;1m\u001b[1;3mI made another mistake, the tool name is actually \"Calculator\" instead of \"calc\".\n",
|
||||||
|
"Action:\n",
|
||||||
|
"```\n",
|
||||||
|
"{\n",
|
||||||
|
" \"action\": \"Calculator\",\n",
|
||||||
|
" \"action_input\": \"2^0.123243\"\n",
|
||||||
|
"}\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[0m\n",
|
||||||
|
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\n",
|
||||||
|
"\u001b[0m\n",
|
||||||
|
"Thought:\u001b[32;1m\u001b[1;3mThe final answer is 1.0891804557407723.\n",
|
||||||
|
"Final Answer: 1.0891804557407723\u001b[0m\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'1.0891804557407723'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Agent run with tracing using a chat model\n",
|
||||||
|
"agent = initialize_agent(\n",
|
||||||
|
" tools, ChatOpenAI(temperature=0), agent=\"chat-zero-shot-react-description\", verbose=True\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"agent.run(\"What is 2 raised to .123243 power?\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"id": "25addd7f",
|
"id": "76abfd82",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": []
|
"source": []
|
||||||
@ -112,7 +208,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.9"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -60,13 +60,44 @@ class BaseChatModel(BaseLanguageModel, BaseModel, ABC):
|
|||||||
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
prompt_messages = [p.to_messages() for p in prompts]
|
prompt_messages = [p.to_messages() for p in prompts]
|
||||||
return self.generate(prompt_messages, stop=stop)
|
prompt_strings = [p.to_string() for p in prompts]
|
||||||
|
self.callback_manager.on_llm_start(
|
||||||
|
{"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
output = self.generate(prompt_messages, stop=stop)
|
||||||
|
except (KeyboardInterrupt, Exception) as e:
|
||||||
|
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||||
|
raise e
|
||||||
|
self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||||
|
return output
|
||||||
|
|
||||||
async def agenerate_prompt(
|
async def agenerate_prompt(
|
||||||
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
self, prompts: List[PromptValue], stop: Optional[List[str]] = None
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
prompt_messages = [p.to_messages() for p in prompts]
|
prompt_messages = [p.to_messages() for p in prompts]
|
||||||
return await self.agenerate(prompt_messages, stop=stop)
|
prompt_strings = [p.to_string() for p in prompts]
|
||||||
|
if self.callback_manager.is_async:
|
||||||
|
await self.callback_manager.on_llm_start(
|
||||||
|
{"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.callback_manager.on_llm_start(
|
||||||
|
{"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
output = await self.agenerate(prompt_messages, stop=stop)
|
||||||
|
except (KeyboardInterrupt, Exception) as e:
|
||||||
|
if self.callback_manager.is_async:
|
||||||
|
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||||
|
else:
|
||||||
|
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||||
|
raise e
|
||||||
|
if self.callback_manager.is_async:
|
||||||
|
await self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||||
|
else:
|
||||||
|
self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||||
|
return output
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _generate(
|
def _generate(
|
||||||
|
@ -7,6 +7,7 @@ from typing import Any, Callable, List, Sequence, Tuple, Type, Union
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from langchain.memory.buffer import get_buffer_string
|
||||||
from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate
|
from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain.prompts.prompt import PromptTemplate
|
||||||
from langchain.schema import (
|
from langchain.schema import (
|
||||||
@ -111,7 +112,7 @@ class ChatPromptValue(PromptValue):
|
|||||||
|
|
||||||
def to_string(self) -> str:
|
def to_string(self) -> str:
|
||||||
"""Return prompt as string."""
|
"""Return prompt as string."""
|
||||||
return str(self.messages)
|
return get_buffer_string(self.messages)
|
||||||
|
|
||||||
def to_messages(self) -> List[BaseMessage]:
|
def to_messages(self) -> List[BaseMessage]:
|
||||||
"""Return prompt as messages."""
|
"""Return prompt as messages."""
|
||||||
|
@ -70,12 +70,10 @@ def test_chat_prompt_template() -> None:
|
|||||||
|
|
||||||
string = prompt.to_string()
|
string = prompt.to_string()
|
||||||
expected = (
|
expected = (
|
||||||
'[SystemMessage(content="Here\'s some context: context", '
|
"System: Here's some context: context\n"
|
||||||
'additional_kwargs={}), HumanMessage(content="Hello foo, '
|
"Human: Hello foo, I'm bar. Thanks for the context\n"
|
||||||
"I'm bar. Thanks for the context\", additional_kwargs={}), "
|
"AI: I'm an AI. I'm foo. I'm bar.\n"
|
||||||
"AIMessage(content=\"I'm an AI. I'm foo. I'm bar.\", additional_kwargs={}), "
|
"test: I'm a generic message. I'm foo. I'm bar."
|
||||||
"ChatMessage(content=\"I'm a generic message. I'm foo. I'm bar.\","
|
|
||||||
" additional_kwargs={}, role='test')]"
|
|
||||||
)
|
)
|
||||||
assert string == expected
|
assert string == expected
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user