diff --git a/configs/eval/generate.yaml b/configs/eval/generate.yaml deleted file mode 100644 index b06137d6..00000000 --- a/configs/eval/generate.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# model/tokenizer -model_name: # update with llama model name -tokenizer_name: # update with llama model name -lora: true -lora_path: "nomic-ai/gpt4all-lora" diff --git a/configs/eval/generate_baseline.yaml b/configs/eval/generate_baseline.yaml index d409d3ab..7c70c814 100644 --- a/configs/eval/generate_baseline.yaml +++ b/configs/eval/generate_baseline.yaml @@ -1,5 +1,5 @@ # model/tokenizer -model_name: # update with llama model name -tokenizer_name: # update with llama model name +model_name: "zpn/llama-7b" +tokenizer_name: "zpn/llama-7b" lora: true lora_path: "tloen/alpaca-lora-7b" \ No newline at end of file diff --git a/configs/eval/generate_gpt4all_gptj.yaml b/configs/eval/generate_gpt4all_gptj.yaml index 496ce25f..fc0df450 100644 --- a/configs/eval/generate_gpt4all_gptj.yaml +++ b/configs/eval/generate_gpt4all_gptj.yaml @@ -1,4 +1,4 @@ # model/tokenizer -model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1" +model_name: "nomic-ai/gpt4all-warmup-lr-epoch_0" tokenizer_name: "EleutherAI/gpt-j-6b" lora: false diff --git a/configs/eval/generate_gpt4all_llama_lora.yaml b/configs/eval/generate_gpt4all_llama_lora.yaml new file mode 100644 index 00000000..e1b68263 --- /dev/null +++ b/configs/eval/generate_gpt4all_llama_lora.yaml @@ -0,0 +1,5 @@ +# model/tokenizer +model_name: "zpn/llama-7b" +tokenizer_name: "zpn/llama-7b" +lora: true +lora_path: "nomic-ai/gpt4all-lora" diff --git a/configs/generate/generate.yaml b/configs/generate/generate.yaml index f81ca3f9..3953d07b 100644 --- a/configs/generate/generate.yaml +++ b/configs/generate/generate.yaml @@ -1,6 +1,6 @@ # model/tokenizer -model_name: # REPLACE HERE with the base llama model -tokenizer_name: # REPLACE HERE with the llama tokenizer +model_name: "zpn/llama-7b" +tokenizer_name: "zpn/llama-7b" lora: true lora_path: "nomic-ai/gpt4all-lora" diff --git a/configs/generate/generate_gptj.yaml b/configs/generate/generate_gptj.yaml new file mode 100644 index 00000000..6c9cad42 --- /dev/null +++ b/configs/generate/generate_gptj.yaml @@ -0,0 +1,15 @@ +# model/tokenizer +model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1" +tokenizer_name: "EleutherAI/gpt-j-6b" +lora: false + + +max_new_tokens: 512 +temperature: 0.001 +prompt: | + #this code prints a string reversed + my_string = "hello how are you" + print(len(my_string)) + + + My code above does not work. Can you help me? diff --git a/configs/generate/generate_gptj_lora.yaml b/configs/generate/generate_gptj_lora.yaml new file mode 100644 index 00000000..4444e194 --- /dev/null +++ b/configs/generate/generate_gptj_lora.yaml @@ -0,0 +1,15 @@ +# model/tokenizer +model_name: "EleutherAI/gpt-j-6b" +tokenizer_name: "EleutherAI/gpt-j-6b" +lora: true +lora_path: "nomic-ai/gpt4all-gptj-lora-epoch_0" + +max_new_tokens: 512 +temperature: 0 +prompt: | + #this code prints a string reversed + my_string = "hello how are you" + print(len(my_string)) + + + My code above does not work. Can you help me? \ No newline at end of file