mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-04 06:03:31 +00:00
- easier to write custom logic/loops with automatic tracing - if you don't want to streaming support write a regular function and pass to RunnableLambda - if you do want streaming write a generator and pass it to RunnableGenerator ```py import json from typing import AsyncIterator from langchain_core.messages import BaseMessage, FunctionMessage, HumanMessage from langchain_core.agents import AgentAction, AgentFinish from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables import Runnable, RunnableGenerator, RunnablePassthrough from langchain_core.tools import BaseTool from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.chat_models import ChatOpenAI from langchain.tools.render import format_tool_to_openai_function def _get_tavily(): from langchain.tools.tavily_search import TavilySearchResults from langchain.utilities.tavily_search import TavilySearchAPIWrapper tavily_search = TavilySearchAPIWrapper() return TavilySearchResults(api_wrapper=tavily_search) async def _agent_executor_generator( input: AsyncIterator[list[BaseMessage]], *, max_iterations: int = 10, tools: dict[str, BaseTool], agent: Runnable[list[BaseMessage], BaseMessage], parser: Runnable[BaseMessage, AgentAction | AgentFinish], ) -> AsyncIterator[BaseMessage]: messages = [m async for mm in input for m in mm] for _ in range(max_iterations): next_message = await agent.ainvoke(messages) yield next_message messages.append(next_message) parsed = await parser.ainvoke(next_message) if isinstance(parsed, AgentAction): result = await tools[parsed.tool].ainvoke(parsed.tool_input) next_message = FunctionMessage(name=parsed.tool, content=json.dumps(result)) yield next_message messages.append(next_message) elif isinstance(parsed, AgentFinish): return def get_agent_executor(tools: list[BaseTool], system_message: str): llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, streaming=True) prompt = ChatPromptTemplate.from_messages( [ ("system", system_message), MessagesPlaceholder(variable_name="messages"), ] ) llm_with_tools = llm.bind( functions=[format_tool_to_openai_function(t) for t in tools] ) agent = {"messages": RunnablePassthrough()} | prompt | llm_with_tools parser = OpenAIFunctionsAgentOutputParser() executor = RunnableGenerator(_agent_executor_generator) return executor.bind( tools={tool.name for tool in tools}, agent=agent, parser=parser ) agent = get_agent_executor([_get_tavily()], "You are a very nice agent!") async def main(): async for message in agent.astream( [HumanMessage(content="whats the weather in sf tomorrow?")] ): print(message) if __name__ == "__main__": import asyncio asyncio.run(main()) ``` results in this trace https://smith.langchain.com/public/fa17f05d-9724-4d08-8fa1-750f8fcd051b/r |
||
---|---|---|
.. | ||
__init__.py | ||
base.py | ||
branch.py | ||
config.py | ||
configurable.py | ||
fallbacks.py | ||
graph_draw.py | ||
graph.py | ||
history.py | ||
passthrough.py | ||
retry.py | ||
router.py | ||
utils.py |