langchain/libs/core/tests/unit_tests
Nuno Campos 9cbf14dec2
Fetch runnable config from context var inside runnable lambda and runnable generator (#15334)
- easier to write custom logic/loops with automatic tracing
- if you don't want to streaming support write a regular function and
pass to RunnableLambda
- if you do want streaming write a generator and pass it to
RunnableGenerator

```py
import json
from typing import AsyncIterator

from langchain_core.messages import BaseMessage, FunctionMessage, HumanMessage
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable, RunnableGenerator, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.tools.render import format_tool_to_openai_function


def _get_tavily():
    from langchain.tools.tavily_search import TavilySearchResults
    from langchain.utilities.tavily_search import TavilySearchAPIWrapper

    tavily_search = TavilySearchAPIWrapper()
    return TavilySearchResults(api_wrapper=tavily_search)


async def _agent_executor_generator(
    input: AsyncIterator[list[BaseMessage]],
    *,
    max_iterations: int = 10,
    tools: dict[str, BaseTool],
    agent: Runnable[list[BaseMessage], BaseMessage],
    parser: Runnable[BaseMessage, AgentAction | AgentFinish],
) -> AsyncIterator[BaseMessage]:
    messages = [m async for mm in input for m in mm]
    for _ in range(max_iterations):
        next_message = await agent.ainvoke(messages)
        yield next_message
        messages.append(next_message)

        parsed = await parser.ainvoke(next_message)
        if isinstance(parsed, AgentAction):
            result = await tools[parsed.tool].ainvoke(parsed.tool_input)
            next_message = FunctionMessage(name=parsed.tool, content=json.dumps(result))
            yield next_message
            messages.append(next_message)
        elif isinstance(parsed, AgentFinish):
            return


def get_agent_executor(tools: list[BaseTool], system_message: str):
    llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, streaming=True)
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system_message),
            MessagesPlaceholder(variable_name="messages"),
        ]
    )
    llm_with_tools = llm.bind(
        functions=[format_tool_to_openai_function(t) for t in tools]
    )

    agent = {"messages": RunnablePassthrough()} | prompt | llm_with_tools
    parser = OpenAIFunctionsAgentOutputParser()
    executor = RunnableGenerator(_agent_executor_generator)
    return executor.bind(
        tools={tool.name for tool in tools}, agent=agent, parser=parser
    )


agent = get_agent_executor([_get_tavily()], "You are a very nice agent!")


async def main():
    async for message in agent.astream(
        [HumanMessage(content="whats the weather in sf tomorrow?")]
    ):
        print(message)


if __name__ == "__main__":
    import asyncio

    asyncio.run(main())
```

results in this trace
https://smith.langchain.com/public/fa17f05d-9724-4d08-8fa1-750f8fcd051b/r
2024-01-02 12:16:39 -08:00
..
_api REFACTOR: Refactor langchain_core (#13627) 2023-11-21 08:35:29 -08:00
callbacks Calculate trace_id and dotted_order client side (#15351) 2024-01-02 10:13:34 -08:00
data
documents REFACTOR: combine core documents files (#13733) 2023-11-22 10:10:26 -08:00
example_selectors BUG: more core fixes (#13665) 2023-11-21 15:15:48 -08:00
examples
fake core[patch]: add response kwarg to on_llm_error 2023-12-04 15:04:48 -08:00
language_models [core: minor] fix getters (#15181) 2023-12-26 12:32:55 -08:00
load BUG: more core fixes (#13665) 2023-11-21 15:15:48 -08:00
messages Fixes for opengpts release (#13960) 2023-11-28 21:49:43 +00:00
output_parsers [core] add test for json parser (#15297) 2023-12-29 09:59:39 -08:00
outputs BUG: more core fixes (#13665) 2023-11-21 15:15:48 -08:00
prompts [core] prompt changes (#15186) 2023-12-26 15:52:17 -08:00
runnables Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) 2024-01-02 12:16:39 -08:00
tracers core[patch], langchain[patch]: fix required deps (#14373) 2023-12-07 14:24:58 -08:00
utils Update _retrieve_ref inside json_schema.py to include an isdigit() check (#14745) 2024-01-01 16:25:42 -08:00
__init__.py
conftest.py core[patch], langchain[patch]: fix required deps (#14373) 2023-12-07 14:24:58 -08:00
prompt_file.txt
test_globals.py
test_imports.py infra: Fix test filesystem paths incompatible with windows (#14388) 2023-12-21 13:45:42 -08:00
test_messages.py \Fix tool_calls message merge (#14613) 2023-12-13 12:37:40 -08:00
test_outputs.py REFACTOR: Refactor langchain_core (#13627) 2023-11-21 08:35:29 -08:00
test_tools.py REFACTOR: Refactor langchain_core (#13627) 2023-11-21 08:35:29 -08:00