diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 0f9d0ab0..076e3c0b 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -18,7 +18,7 @@ endif() set(APP_VERSION_MAJOR 2) set(APP_VERSION_MINOR 7) -set(APP_VERSION_PATCH 1) +set(APP_VERSION_PATCH 2) set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}") # Include the binary directory for the generated header file diff --git a/gpt4all-chat/metadata/release.json b/gpt4all-chat/metadata/release.json index bd5b9b68..1ca17c3a 100644 --- a/gpt4all-chat/metadata/release.json +++ b/gpt4all-chat/metadata/release.json @@ -683,6 +683,28 @@ * Jared Van Bortel (Nomic AI) * Adam Treat (Nomic AI) * Community (beta testers, bug reporters, bindings authors) +" + }, + { + "version": "2.7.1", + "notes": +" +* Update to latest llama.cpp with support for Google Gemma +* Gemma, Phi and Phi-2, Qwen2, and StableLM are now all GPU accelerated +* Large revamp of the model loading to support explicit unload/reload +* Bugfixes for ChatML and improved version of Mistral OpenOrca +* We no longer load a model by default on application start +* We no longer load a model by default on chat context switch +* Fixes for visual artifacts in update reminder dialog +* Blacklist Intel GPU's for now as we don't support yet +* Fixes for binary save/restore of chat +* Save and restore of window geometry across application starts +", + "contributors": +" +* Jared Van Bortel (Nomic AI) +* Adam Treat (Nomic AI) +* Community (beta testers, bug reporters, bindings authors) " } ]