mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-16 22:51:24 +00:00
Merge branch 'llm_proxy' into dev
# Conflicts: # pilot/server/webserver.py
This commit is contained in:
@@ -85,13 +85,13 @@ class CodeGenChatAdapter(BaseChatAdpter):
|
||||
|
||||
|
||||
class GuanacoChatAdapter(BaseChatAdpter):
|
||||
"""Model chat adapter for Guanaco """
|
||||
|
||||
"""Model chat adapter for Guanaco"""
|
||||
|
||||
def match(self, model_path: str):
|
||||
return "guanaco" in model_path
|
||||
|
||||
def get_generate_stream_func(self):
|
||||
# TODO
|
||||
# TODO
|
||||
pass
|
||||
|
||||
|
||||
@@ -101,7 +101,8 @@ class ProxyllmChatAdapter(BaseChatAdpter):
|
||||
|
||||
def get_generate_stream_func(self):
|
||||
from pilot.model.proxy_llm import proxyllm_generate_stream
|
||||
return proxyllm_generate_stream
|
||||
|
||||
return proxyllm_generate_stream
|
||||
|
||||
|
||||
register_llm_model_chat_adapter(VicunaChatAdapter)
|
||||
|
@@ -37,7 +37,7 @@ class ModelWorker:
|
||||
self.model, self.tokenizer = self.ml.loader(
|
||||
num_gpus, load_8bit=ISLOAD_8BIT, debug=ISDEBUG
|
||||
)
|
||||
|
||||
|
||||
if not isinstance(self.model, str):
|
||||
if hasattr(self.model.config, "max_sequence_length"):
|
||||
self.context_len = self.model.config.max_sequence_length
|
||||
|
Reference in New Issue
Block a user