mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-15 06:00:07 +00:00
replace the customized dataloader setup with the build-in one
This commit is contained in:
@@ -6,8 +6,8 @@ TEST_DATA_DIR=$BASE_DIR/tests/test_data
|
||||
DATA_SAVE_PATH=$BASE_TEMP_DIR/tests
|
||||
CONFIG_DIR=$BASE_DIR/config
|
||||
|
||||
# MODELS=("colossal-llama2" "llama2" "mistral" "chatGLM2" "chatGLM3" "deepseek" "Yi" "baichuan")
|
||||
MODELS=("colossal-llama2" "llama2" "chatGLM2" "chatGLM3" "deepseek" "Yi")
|
||||
MODELS=("colossal-llama2" "llama2" "mistral" "chatGLM2" "chatGLM3" "deepseek" "Yi" "baichuan") # for local test
|
||||
# MODELS=("colossal-llama2" "llama2" "chatGLM2" "chatGLM3" "deepseek" "Yi")
|
||||
|
||||
get_pretrain() {
|
||||
local model=$1
|
||||
|
@@ -30,8 +30,7 @@ MODEL_SAVE_PATH=$TEMP_DIR/rlhf_models
|
||||
MODELS_DIR=$TEMP_DIR/models_config
|
||||
# Skip those tests due to CI tests timeout
|
||||
MODELS=('llama')
|
||||
# ADVANCED_PLUGINS=('pp' 'tp_zero2' 'tp_pp' 'sp_split_gather' 'sp_ring' 'sp_all_to_all' '3d' 'gemini' 'gemini_auto' 'zero2' 'zero2_cpu')
|
||||
ADVANCED_PLUGINS=('tp_zero2' '3d' 'gemini' 'gemini_auto' 'zero2' 'zero2_cpu')
|
||||
ADVANCED_PLUGINS=('sp_split_gather' 'sp_ring' 'sp_all_to_all' 'tp_zero2' '3d' 'gemini' 'gemini_auto' 'zero2' 'zero2_cpu') # pp is still buggy
|
||||
PLUGINS=('3d' 'gemini' 'gemini_auto' 'zero2' 'zero2_cpu')
|
||||
LORA_RANK=('0') # skip to reduce CI execution time, can pass all locally
|
||||
|
||||
@@ -281,7 +280,7 @@ echo "[Test]: testing ppo ..."
|
||||
|
||||
|
||||
SKIPPED_TESTS=(
|
||||
# llama-3d # 3d plugin doesn't support lora
|
||||
llama-3d # 3d plugin doesn't support lora
|
||||
llama-gemini # gemini doesn't support lora
|
||||
)
|
||||
|
||||
@@ -359,7 +358,7 @@ for lora_rank in ${LORA_RANK[@]}; do
|
||||
$grad_ckpt \
|
||||
--max_len 400 \
|
||||
--max_seq_len 10 \
|
||||
--use_flash_attn
|
||||
# --use_flash_attn
|
||||
passed=$?
|
||||
if [ $passed -eq 0 ]; then
|
||||
rm -rf $MODEL_SAVE_PATH/*
|
||||
|
Reference in New Issue
Block a user