feat(model): Support llama3.1 models (#1744)

This commit is contained in:
Fangyin Cheng
2024-07-24 11:00:05 +08:00
committed by GitHub
parent 4149252321
commit 3c5ed9d8c1
10 changed files with 125 additions and 2 deletions

View File

@@ -403,7 +403,12 @@ class Llama3Adapter(NewHFChatModelAdapter):
support_8bit: bool = True
def do_match(self, lower_model_name_or_path: Optional[str] = None):
return lower_model_name_or_path and "llama-3" in lower_model_name_or_path
return (
lower_model_name_or_path
and "llama-3" in lower_model_name_or_path
and "instruct" in lower_model_name_or_path
and "3.1" not in lower_model_name_or_path
)
def get_str_prompt(
self,
@@ -431,6 +436,22 @@ class Llama3Adapter(NewHFChatModelAdapter):
return str_prompt
class Llama31Adapter(Llama3Adapter):
def check_transformer_version(self, current_version: str) -> None:
logger.info(f"Checking transformers version: Current version {current_version}")
if not current_version >= "4.43.0":
raise ValueError(
"Llama-3.1 require transformers.__version__>=4.43.0, please upgrade your transformers package."
)
def do_match(self, lower_model_name_or_path: Optional[str] = None):
return (
lower_model_name_or_path
and "llama-3.1" in lower_model_name_or_path
and "instruct" in lower_model_name_or_path
)
class DeepseekV2Adapter(NewHFChatModelAdapter):
support_4bit: bool = False
support_8bit: bool = False
@@ -613,6 +634,7 @@ register_model_adapter(StarlingLMAdapter)
register_model_adapter(QwenAdapter)
register_model_adapter(QwenMoeAdapter)
register_model_adapter(Llama3Adapter)
register_model_adapter(Llama31Adapter)
register_model_adapter(DeepseekV2Adapter)
register_model_adapter(DeepseekCoderV2Adapter)
register_model_adapter(SailorAdapter)