diff --git a/pilot/scene/base_chat.py b/pilot/scene/base_chat.py index 88f457935..726db8304 100644 --- a/pilot/scene/base_chat.py +++ b/pilot/scene/base_chat.py @@ -178,7 +178,7 @@ class BaseChat(ABC): async def nostream_call(self): payload = self.__call_base() - logger.info(f"Requert: \n{payload}") + logger.info(f"Request: \n{payload}") ai_response_text = "" try: from pilot.model.worker.manager import worker_manager @@ -231,12 +231,17 @@ class BaseChat(ABC): except StopAsyncIteration: break + def _blocking_nostream_call(self): logger.warn( "_blocking_nostream_call is only temporarily used in webserver and will be deleted soon, please use nostream_call to replace it for higher performance" ) loop = get_or_create_event_loop() - return loop.run_until_complete(self.nostream_call()) + try: + return loop.run_until_complete(self.nostream_call()) + finally: + loop.close() + def call(self): if self.prompt_template.stream_out: @@ -244,7 +249,7 @@ class BaseChat(ABC): else: return self._blocking_nostream_call() - def prepare(self): + async def prepare(self): pass def generate_llm_text(self) -> str: diff --git a/pilot/scene/chat_data/chat_excel/excel_analyze/chat.py b/pilot/scene/chat_data/chat_excel/excel_analyze/chat.py index a3f4dc0b2..7f6ae856c 100644 --- a/pilot/scene/chat_data/chat_excel/excel_analyze/chat.py +++ b/pilot/scene/chat_data/chat_excel/excel_analyze/chat.py @@ -1,6 +1,6 @@ import json import os - +import asyncio from typing import List, Any, Dict from pilot.scene.base_message import ( @@ -81,7 +81,7 @@ class ChatExcel(BaseChat): } return input_values - def prepare(self): + async def prepare(self): logger.info(f"{self.chat_mode} prepare start!") if len(self.history_message) > 0: return None @@ -93,7 +93,7 @@ class ChatExcel(BaseChat): "excel_reader": self.excel_reader, } learn_chat = ExcelLearning(**chat_param) - result = learn_chat.call() + result = await learn_chat.nostream_call() return result def do_action(self, prompt_response):