mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-29 08:47:39 +00:00
llamamodel: add missing softmax to fix temperature (#3202)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
ffd29eae08
commit
0c70b5a5f4
@ -584,7 +584,8 @@ void LLamaModel::initSampler(const PromptContext &promptCtx)
|
|||||||
llama_sampler_init_top_p(promptCtx.top_p, 1),
|
llama_sampler_init_top_p(promptCtx.top_p, 1),
|
||||||
llama_sampler_init_min_p(promptCtx.min_p, 1),
|
llama_sampler_init_min_p(promptCtx.min_p, 1),
|
||||||
llama_sampler_init_temp(promptCtx.temp),
|
llama_sampler_init_temp(promptCtx.temp),
|
||||||
llama_sampler_init_dist(LLAMA_DEFAULT_SEED)
|
llama_sampler_init_softmax(),
|
||||||
|
llama_sampler_init_dist(LLAMA_DEFAULT_SEED),
|
||||||
};
|
};
|
||||||
for (auto *smpl : samplers)
|
for (auto *smpl : samplers)
|
||||||
llama_sampler_chain_add(chain, smpl);
|
llama_sampler_chain_add(chain, smpl);
|
||||||
|
@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
|||||||
### Fixed
|
### Fixed
|
||||||
- Fix bug in GUI when localdocs encounters binary data ([#3137](https://github.com/nomic-ai/gpt4all/pull/3137))
|
- Fix bug in GUI when localdocs encounters binary data ([#3137](https://github.com/nomic-ai/gpt4all/pull/3137))
|
||||||
- Fix LocalDocs bugs that prevented some docx files from fully chunking ([#3140](https://github.com/nomic-ai/gpt4all/pull/3140))
|
- Fix LocalDocs bugs that prevented some docx files from fully chunking ([#3140](https://github.com/nomic-ai/gpt4all/pull/3140))
|
||||||
|
- Fix missing softmax that was causing crashes and effectively infinite temperature since 3.4.0 ([#3202](https://github.com/nomic-ai/gpt4all/pull/3202))
|
||||||
|
|
||||||
## [3.4.2] - 2024-10-16
|
## [3.4.2] - 2024-10-16
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user