mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-13 16:36:06 +00:00
fix: agenerate miss run_manager args in llm.py (#4566)
# fix: agenerate miss run_manager args in llm.py <!-- Thank you for contributing to LangChain! Your PR will appear in our next release under the title you set. Please make sure it highlights your valuable contribution. Replace this with a description of the change, the issue it fixes (if applicable), and relevant context. List any dependencies required for this change. After you're done, someone will review your PR. They may suggest improvements. If no one reviews your PR within a few days, feel free to @-mention the same people again, as notifications can get lost. --> <!-- Remove if not applicable --> Fixes # (issue) fix: agenerate miss run_manager args in llm.py <!-- For a quicker response, figure out the right person to tag with @ @hwchase17 - project lead Tracing / Callbacks - @agola11 Async - @agola11 DataLoaders - @eyurtsev Models - @hwchase17 - @agola11 Agents / Tools / Toolkits - @vowelparrot VectorStores / Retrievers / Memory - @dev2049 -->
This commit is contained in:
parent
4e56d3119c
commit
bcffc704c1
@ -86,7 +86,7 @@ class LLMChain(Chain):
|
|||||||
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
"""Generate LLM result from inputs."""
|
"""Generate LLM result from inputs."""
|
||||||
prompts, stop = await self.aprep_prompts(input_list)
|
prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)
|
||||||
return await self.llm.agenerate_prompt(
|
return await self.llm.agenerate_prompt(
|
||||||
prompts, stop, callbacks=run_manager.get_child() if run_manager else None
|
prompts, stop, callbacks=run_manager.get_child() if run_manager else None
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user