diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index 5ab2e4fbe..9699061b7 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -21,7 +21,9 @@ LLM_MODEL_CONFIG = { "flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"), "vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"), "text2vec": os.path.join(MODEL_PATH, "text2vec-large-chinese"), - "sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2") + "sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2"), + "codegen2-7b": os.path.join(MODEL_PATH, ""), + "codet5p-2b": os.path.join(MODEL_PATH, "codet5p-2b"), } # Load model config diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index 9afd2c01f..83d8a3717 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -68,6 +68,19 @@ class ChatGLMAdapater(BaseLLMAdaper): model_path, trust_remote_code=True, **from_pretrained_kwargs ).half().cuda() return model, tokenizer + +class ZiYaLLaMaAdapter(BaseLLMAdaper): + # TODO + pass + +class CodeGenAdapter(BaseLLMAdaper): + pass + +class StarCoderAdapter(BaseLLMAdaper): + pass + +class T5CodeAdapter(BaseLLMAdaper): + pass class KoalaLLMAdapter(BaseLLMAdaper): """Koala LLM Adapter which Based LLaMA """