mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-06 19:40:13 +00:00
feat(model): Support gemma-2 model (#1675)
This commit is contained in:
@@ -112,6 +112,31 @@ class LLMModelAdapter(ABC):
|
||||
"""Load model and tokenizer"""
|
||||
raise NotImplementedError
|
||||
|
||||
def parse_max_length(self, model, tokenizer) -> Optional[int]:
|
||||
"""Parse the max_length of the model.
|
||||
|
||||
Returns:
|
||||
Optional[int]: The max_length of the model
|
||||
"""
|
||||
if not (tokenizer or model):
|
||||
return None
|
||||
try:
|
||||
model_max_length = None
|
||||
if tokenizer and hasattr(tokenizer, "model_max_length"):
|
||||
model_max_length = tokenizer.model_max_length
|
||||
if model_max_length and model_max_length < 100000000:
|
||||
# Can't be too large
|
||||
return model_max_length
|
||||
if model and hasattr(model, "config"):
|
||||
model_config = model.config
|
||||
if hasattr(model_config, "max_sequence_length"):
|
||||
return model_config.max_sequence_length
|
||||
if hasattr(model_config, "max_position_embeddings"):
|
||||
return model_config.max_position_embeddings
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def load_from_params(self, params):
|
||||
"""Load the model and tokenizer according to the given parameters"""
|
||||
raise NotImplementedError
|
||||
|
@@ -73,6 +73,10 @@ class NewHFChatModelAdapter(LLMModelAdapter, ABC):
|
||||
) from exc
|
||||
self.check_dependencies()
|
||||
|
||||
logger.info(
|
||||
f"Load model from {model_path}, from_pretrained_kwargs: {from_pretrained_kwargs}"
|
||||
)
|
||||
|
||||
revision = from_pretrained_kwargs.get("revision", "main")
|
||||
try:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
@@ -235,6 +239,43 @@ class GemmaAdapter(NewHFChatModelAdapter):
|
||||
)
|
||||
|
||||
|
||||
class Gemma2Adapter(NewHFChatModelAdapter):
|
||||
"""
|
||||
https://huggingface.co/google/gemma-2-27b-it
|
||||
https://huggingface.co/google/gemma-2-9b-it
|
||||
"""
|
||||
|
||||
support_4bit: bool = True
|
||||
support_8bit: bool = True
|
||||
support_system_message: bool = False
|
||||
|
||||
def use_fast_tokenizer(self) -> bool:
|
||||
return True
|
||||
|
||||
def check_transformer_version(self, current_version: str) -> None:
|
||||
if not current_version >= "4.42.1":
|
||||
raise ValueError(
|
||||
"Gemma2 require transformers.__version__>=4.42.1, please upgrade your transformers package."
|
||||
)
|
||||
|
||||
def do_match(self, lower_model_name_or_path: Optional[str] = None):
|
||||
return (
|
||||
lower_model_name_or_path
|
||||
and "gemma-2-" in lower_model_name_or_path
|
||||
and "it" in lower_model_name_or_path
|
||||
)
|
||||
|
||||
def load(self, model_path: str, from_pretrained_kwargs: dict):
|
||||
import torch
|
||||
|
||||
if not from_pretrained_kwargs:
|
||||
from_pretrained_kwargs = {}
|
||||
from_pretrained_kwargs["torch_dtype"] = torch.bfloat16
|
||||
# from_pretrained_kwargs["revision"] = "float16"
|
||||
model, tokenizer = super().load(model_path, from_pretrained_kwargs)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
class StarlingLMAdapter(NewHFChatModelAdapter):
|
||||
"""
|
||||
https://huggingface.co/Nexusflow/Starling-LM-7B-beta
|
||||
@@ -416,6 +457,17 @@ class DeepseekV2Adapter(NewHFChatModelAdapter):
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
class DeepseekCoderV2Adapter(DeepseekV2Adapter):
|
||||
def do_match(self, lower_model_name_or_path: Optional[str] = None):
|
||||
return (
|
||||
lower_model_name_or_path
|
||||
and "deepseek" in lower_model_name_or_path
|
||||
and "coder" in lower_model_name_or_path
|
||||
and "v2" in lower_model_name_or_path
|
||||
and "instruct" in lower_model_name_or_path
|
||||
)
|
||||
|
||||
|
||||
class SailorAdapter(QwenAdapter):
|
||||
"""
|
||||
https://huggingface.co/sail/Sailor-14B-Chat
|
||||
@@ -520,11 +572,13 @@ register_model_adapter(Yi15Adapter)
|
||||
register_model_adapter(Mixtral8x7BAdapter)
|
||||
register_model_adapter(SOLARAdapter)
|
||||
register_model_adapter(GemmaAdapter)
|
||||
register_model_adapter(Gemma2Adapter)
|
||||
register_model_adapter(StarlingLMAdapter)
|
||||
register_model_adapter(QwenAdapter)
|
||||
register_model_adapter(QwenMoeAdapter)
|
||||
register_model_adapter(Llama3Adapter)
|
||||
register_model_adapter(DeepseekV2Adapter)
|
||||
register_model_adapter(DeepseekCoderV2Adapter)
|
||||
register_model_adapter(SailorAdapter)
|
||||
register_model_adapter(PhiAdapter)
|
||||
register_model_adapter(SQLCoderAdapter)
|
||||
|
@@ -116,7 +116,9 @@ class DefaultModelWorker(ModelWorker):
|
||||
self.model, self.tokenizer = self.ml.loader_with_params(
|
||||
model_params, self.llm_adapter
|
||||
)
|
||||
model_max_length = _parse_model_max_length(self.model, self.tokenizer)
|
||||
model_max_length = self.llm_adapter.parse_max_length(
|
||||
self.model, self.tokenizer
|
||||
)
|
||||
if model_max_length:
|
||||
logger.info(
|
||||
f"Parse model max length {model_max_length} from model {self.model_name}."
|
||||
|
@@ -21,7 +21,7 @@ def huggingface_chat_generate_stream(
|
||||
echo = params.get("echo", False)
|
||||
max_new_tokens = int(params.get("max_new_tokens", 2048))
|
||||
stop_token_ids = params.get("stop_token_ids", [])
|
||||
do_sample = params.get("do_sample", None)
|
||||
do_sample = params.get("do_sample", True)
|
||||
custom_stop_words = params.get("custom_stop_words", [])
|
||||
|
||||
input_ids = tokenizer(prompt).input_ids
|
||||
@@ -34,11 +34,6 @@ def huggingface_chat_generate_stream(
|
||||
input_echo_len = len(input_ids)
|
||||
input_ids = torch.as_tensor([input_ids], device=device)
|
||||
|
||||
# messages = params["messages"]
|
||||
# messages = ModelMessage.to_openai_messages(messages)
|
||||
# input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
|
||||
# input_ids = input_ids.to(device)
|
||||
|
||||
streamer = TextIteratorStreamer(
|
||||
tokenizer, skip_prompt=not echo, skip_special_tokens=True
|
||||
)
|
||||
@@ -55,7 +50,9 @@ def huggingface_chat_generate_stream(
|
||||
if do_sample is not None:
|
||||
base_kwargs["do_sample"] = do_sample
|
||||
|
||||
logger.info(f"Predict with parameters: {base_kwargs}")
|
||||
logger.info(
|
||||
f"Predict with parameters: {base_kwargs}\ncustom_stop_words: {custom_stop_words}"
|
||||
)
|
||||
|
||||
generate_kwargs = {"input_ids": input_ids, **base_kwargs}
|
||||
thread = Thread(target=model.generate, kwargs=generate_kwargs)
|
||||
|
Reference in New Issue
Block a user