langchain/libs/core/langchain_core/runnables
Nuno Campos 9cbf14dec2
Fetch runnable config from context var inside runnable lambda and runnable generator (#15334)
- easier to write custom logic/loops with automatic tracing
- if you don't want to streaming support write a regular function and
pass to RunnableLambda
- if you do want streaming write a generator and pass it to
RunnableGenerator

```py
import json
from typing import AsyncIterator

from langchain_core.messages import BaseMessage, FunctionMessage, HumanMessage
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable, RunnableGenerator, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.tools.render import format_tool_to_openai_function


def _get_tavily():
    from langchain.tools.tavily_search import TavilySearchResults
    from langchain.utilities.tavily_search import TavilySearchAPIWrapper

    tavily_search = TavilySearchAPIWrapper()
    return TavilySearchResults(api_wrapper=tavily_search)


async def _agent_executor_generator(
    input: AsyncIterator[list[BaseMessage]],
    *,
    max_iterations: int = 10,
    tools: dict[str, BaseTool],
    agent: Runnable[list[BaseMessage], BaseMessage],
    parser: Runnable[BaseMessage, AgentAction | AgentFinish],
) -> AsyncIterator[BaseMessage]:
    messages = [m async for mm in input for m in mm]
    for _ in range(max_iterations):
        next_message = await agent.ainvoke(messages)
        yield next_message
        messages.append(next_message)

        parsed = await parser.ainvoke(next_message)
        if isinstance(parsed, AgentAction):
            result = await tools[parsed.tool].ainvoke(parsed.tool_input)
            next_message = FunctionMessage(name=parsed.tool, content=json.dumps(result))
            yield next_message
            messages.append(next_message)
        elif isinstance(parsed, AgentFinish):
            return


def get_agent_executor(tools: list[BaseTool], system_message: str):
    llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, streaming=True)
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system_message),
            MessagesPlaceholder(variable_name="messages"),
        ]
    )
    llm_with_tools = llm.bind(
        functions=[format_tool_to_openai_function(t) for t in tools]
    )

    agent = {"messages": RunnablePassthrough()} | prompt | llm_with_tools
    parser = OpenAIFunctionsAgentOutputParser()
    executor = RunnableGenerator(_agent_executor_generator)
    return executor.bind(
        tools={tool.name for tool in tools}, agent=agent, parser=parser
    )


agent = get_agent_executor([_get_tavily()], "You are a very nice agent!")


async def main():
    async for message in agent.astream(
        [HumanMessage(content="whats the weather in sf tomorrow?")]
    ):
        print(message)


if __name__ == "__main__":
    import asyncio

    asyncio.run(main())
```

results in this trace
https://smith.langchain.com/public/fa17f05d-9724-4d08-8fa1-750f8fcd051b/r
2024-01-02 12:16:39 -08:00
..
__init__.py Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) 2024-01-02 12:16:39 -08:00
base.py Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) 2024-01-02 12:16:39 -08:00
branch.py core(minor): Implement stream and astream for RunnableBranch (#14805) 2023-12-20 15:37:56 -05:00
config.py Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) 2024-01-02 12:16:39 -08:00
configurable.py Propagate context vars in all classes/methods 2023-12-29 12:34:03 -08:00
fallbacks.py manual mapping (#14422) 2023-12-08 16:29:33 -08:00
graph_draw.py Add Runnable.get_graph() to get a graph representation of a Runnable (#15040) 2023-12-22 11:40:45 -08:00
graph.py [core] print ascii (#15179) 2023-12-26 11:43:14 -08:00
history.py Propagate context vars in all classes/methods 2023-12-29 12:34:03 -08:00
passthrough.py Propagate context vars in all classes/methods 2023-12-29 12:34:03 -08:00
retry.py manual mapping (#14422) 2023-12-08 16:29:33 -08:00
router.py manual mapping (#14422) 2023-12-08 16:29:33 -08:00
utils.py Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) 2024-01-02 12:16:39 -08:00