mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2026-04-26 09:42:27 +00:00
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
This commit is contained in:
@@ -392,4 +392,4 @@ def tokenize_kto(
|
||||
"label": data_point["label"],
|
||||
"input_id_decode": decoded_full_prompt,
|
||||
"completion_decode": decoded_completion,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,4 +356,4 @@ class DPOTrainer(SLTrainer):
|
||||
os.makedirs(self.save_dir, exist_ok=True)
|
||||
with open(os.path.join(self.save_dir, f"eval_result_epoch{epoch}.txt"), "w") as f:
|
||||
f.write(msg)
|
||||
step_bar.close()
|
||||
step_bar.close()
|
||||
|
||||
@@ -346,4 +346,4 @@ class KTOTrainer(SLTrainer):
|
||||
os.makedirs(self.save_dir, exist_ok=True)
|
||||
with open(os.path.join(self.save_dir, f"eval_result_epoch{epoch}.txt"), "w") as f:
|
||||
f.write(msg)
|
||||
step_bar.close()
|
||||
step_bar.close()
|
||||
|
||||
@@ -323,4 +323,4 @@ class ORPOTrainer(SLTrainer):
|
||||
os.makedirs(self.save_dir, exist_ok=True)
|
||||
with open(os.path.join(self.save_dir, f"eval_result_epoch{epoch}.txt"), "w") as f:
|
||||
f.write(msg)
|
||||
step_bar.close()
|
||||
step_bar.close()
|
||||
|
||||
@@ -903,4 +903,4 @@ For details, see [`inference/`](https://github.com/hpcaitech/ColossalAI/tree/mai
|
||||
## Attention
|
||||
|
||||
|
||||
The examples are demos for the whole training process. You need to change the hyper-parameters to reach great performance.
|
||||
The examples are demos for the whole training process. You need to change the hyper-parameters to reach great performance.
|
||||
|
||||
@@ -375,4 +375,4 @@ if __name__ == "__main__":
|
||||
os.makedirs(os.path.dirname(args.config_file), exist_ok=True)
|
||||
with open(args.config_file, "w") as f:
|
||||
json.dump(args.__dict__, f, indent=4)
|
||||
train(args)
|
||||
train(args)
|
||||
|
||||
@@ -340,4 +340,4 @@ if __name__ == "__main__":
|
||||
os.makedirs(os.path.dirname(args.config_file), exist_ok=True)
|
||||
with open(args.config_file, "w") as f:
|
||||
json.dump(args.__dict__, f, indent=4)
|
||||
train(args)
|
||||
train(args)
|
||||
|
||||
@@ -20,4 +20,4 @@ datasets
|
||||
ninja==1.11.1
|
||||
sentencepiece==0.1.99
|
||||
flash-attn
|
||||
tiktoken
|
||||
tiktoken
|
||||
|
||||
@@ -640,4 +640,4 @@ for lora_rank in ${LORA_RANK[@]}; do
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
Reference in New Issue
Block a user