From 44dfda990b5d7e7078c19e8e0b6e4295477a996f Mon Sep 17 00:00:00 2001 From: Shoufa Chen Date: Fri, 7 Apr 2023 05:43:46 +0800 Subject: [PATCH] fix https://github.com/hwchase17/langchain/issues/2392 (#2393) As noted in https://github.com/ggerganov/llama.cpp/blob/master/migrate-ggml-2023-03-30-pr613.py, Authors from `llama.cpp` caused a breaking change to the file format on 2023-03-30 in: https://github.com/ggerganov/llama.cpp/pull/613 Therefore, we need further use `migrate-ggml-2023-03-30-pr613.py` to convert the llama model. --- tests/integration_tests/embeddings/test_llamacpp.py | 9 +++++++-- tests/integration_tests/llms/test_llamacpp.py | 7 ++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/integration_tests/embeddings/test_llamacpp.py b/tests/integration_tests/embeddings/test_llamacpp.py index 36aed8e9f8e..48da7e12e04 100644 --- a/tests/integration_tests/embeddings/test_llamacpp.py +++ b/tests/integration_tests/embeddings/test_llamacpp.py @@ -14,17 +14,22 @@ def get_model() -> str: model_url = "https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin" tokenizer_url = "https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model" conversion_script = "https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py" + migrate_script = "https://github.com/ggerganov/llama.cpp/raw/master/migrate-ggml-2023-03-30-pr613.py" local_filename = model_url.split("/")[-1] + local_filename_ggjt = local_filename.split('.')[0] + '-ggjt.' + local_filename.split('.')[1] if not os.path.exists("convert-unversioned-ggml-to-ggml.py"): urlretrieve(conversion_script, "convert-unversioned-ggml-to-ggml.py") + if not os.path.exists("migrate-ggml-2023-03-30-pr613.py"): + urlretrieve(migrate_script, "migrate-ggml-2023-03-30-pr613.py") if not os.path.exists("tokenizer.model"): urlretrieve(tokenizer_url, "tokenizer.model") if not os.path.exists(local_filename): urlretrieve(model_url, local_filename) - os.system("python convert-unversioned-ggml-to-ggml.py . tokenizer.model") + os.system(f"python convert-unversioned-ggml-to-ggml.py . tokenizer.model") + os.system(f"python migrate-ggml-2023-03-30-pr613.py {local_filename} {local_filename_ggjt}") - return local_filename + return local_filename_ggjt def test_llamacpp_embedding_documents() -> None: diff --git a/tests/integration_tests/llms/test_llamacpp.py b/tests/integration_tests/llms/test_llamacpp.py index 11758aa63cf..9e708eed050 100644 --- a/tests/integration_tests/llms/test_llamacpp.py +++ b/tests/integration_tests/llms/test_llamacpp.py @@ -13,17 +13,22 @@ def get_model() -> str: model_url = "https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin" tokenizer_url = "https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model" conversion_script = "https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py" + migrate_script = "https://github.com/ggerganov/llama.cpp/raw/master/migrate-ggml-2023-03-30-pr613.py" local_filename = model_url.split("/")[-1] + local_filename_ggjt = local_filename.split('.')[0] + '-ggjt.' + local_filename.split('.')[1] if not os.path.exists("convert-unversioned-ggml-to-ggml.py"): urlretrieve(conversion_script, "convert-unversioned-ggml-to-ggml.py") + if not os.path.exists("migrate-ggml-2023-03-30-pr613.py"): + urlretrieve(migrate_script, "migrate-ggml-2023-03-30-pr613.py") if not os.path.exists("tokenizer.model"): urlretrieve(tokenizer_url, "tokenizer.model") if not os.path.exists(local_filename): urlretrieve(model_url, local_filename) os.system(f"python convert-unversioned-ggml-to-ggml.py . tokenizer.model") + os.system(f"python migrate-ggml-2023-03-30-pr613.py {local_filename} {local_filename_ggjt}") - return local_filename + return local_filename_ggjt def test_llamacpp_inference() -> None: