From 0c70b5a5f461a8b81434a0f52f52b903b3c48cf6 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Wed, 4 Dec 2024 10:56:19 -0500 Subject: [PATCH] llamamodel: add missing softmax to fix temperature (#3202) Signed-off-by: Jared Van Bortel --- gpt4all-backend/src/llamamodel.cpp | 3 ++- gpt4all-chat/CHANGELOG.md | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/gpt4all-backend/src/llamamodel.cpp b/gpt4all-backend/src/llamamodel.cpp index af03af81..86c2ea1f 100644 --- a/gpt4all-backend/src/llamamodel.cpp +++ b/gpt4all-backend/src/llamamodel.cpp @@ -584,7 +584,8 @@ void LLamaModel::initSampler(const PromptContext &promptCtx) llama_sampler_init_top_p(promptCtx.top_p, 1), llama_sampler_init_min_p(promptCtx.min_p, 1), llama_sampler_init_temp(promptCtx.temp), - llama_sampler_init_dist(LLAMA_DEFAULT_SEED) + llama_sampler_init_softmax(), + llama_sampler_init_dist(LLAMA_DEFAULT_SEED), }; for (auto *smpl : samplers) llama_sampler_chain_add(chain, smpl); diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index ea57e14b..bcdf94e3 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ### Fixed - Fix bug in GUI when localdocs encounters binary data ([#3137](https://github.com/nomic-ai/gpt4all/pull/3137)) - Fix LocalDocs bugs that prevented some docx files from fully chunking ([#3140](https://github.com/nomic-ai/gpt4all/pull/3140)) +- Fix missing softmax that was causing crashes and effectively infinite temperature since 3.4.0 ([#3202](https://github.com/nomic-ai/gpt4all/pull/3202)) ## [3.4.2] - 2024-10-16