From ed080060538c5ed95c2ce611ac5ae48d04e1e1bf Mon Sep 17 00:00:00 2001 From: Zach Nussbaum Date: Mon, 27 Mar 2023 17:33:13 +0000 Subject: [PATCH] feat: lora config --- configs/train/finetune_lora.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/configs/train/finetune_lora.yaml b/configs/train/finetune_lora.yaml index 51e8809e..47b1901e 100644 --- a/configs/train/finetune_lora.yaml +++ b/configs/train/finetune_lora.yaml @@ -2,12 +2,12 @@ model_name: "zpn/llama-7b" tokenizer_name: "zpn/llama-7b" gradient_checkpointing: false -save_name: "zpn/vicuna-lora" +save_name: "nomic-ai/vicuna-lora-multi-turn" # dataset streaming: false num_proc: 64 -dataset_path: "data" +dataset_path: "data_multiturn" max_length: 1024 batch_size: 4 @@ -16,10 +16,11 @@ lr: 5.0e-5 eval_every: 2000 eval_steps: 100 save_every: 2000 -output_dir: "ckpts/llama-7b" +output_dir: "ckpts/llama-7b-lora-multi" checkpoint: null lora: true warmup_steps: 100 +num_epochs: 2 # logging wandb: true