feat(typescript)/dynamic template (#1287) (#1326)

* feat(typescript)/dynamic template (#1287)

* remove packaged yarn

* prompt templates update wip

* prompt template update

* system prompt template, update types, remove embed promises, cleanup

* support both snakecased and camelcased prompt context

* fix #1277 libbert, libfalcon and libreplit libs not being moved into the right folder after build

* added support for modelConfigFile param, allowing the user to specify a local file instead of downloading the remote models.json. added a warning message if code fails to load a model config. included prompt context docs by amogus.

* snakecase warning, put logic for loading local models.json into listModels, added constant for the default remote model list url, test improvements, simpler hasOwnProperty call

* add DEFAULT_PROMPT_CONTEXT, export new constants

* add md5sum testcase and fix constants export

* update types

* throw if attempting to list models without a source

* rebuild docs

* fix download logging undefined url, toFixed typo, pass config filesize in for future progress report

* added overload with union types

* bump to 2.2.0, remove alpha

* code speling

---------

Co-authored-by: Andreas Obersteiner <8959303+iimez@users.noreply.github.com>
This commit is contained in:
Jacob Nguyen
2023-08-14 11:45:45 -05:00
committed by GitHub
parent 4d855afe97
commit 4e55940edf
15 changed files with 5876 additions and 6938 deletions

View File

@@ -1,9 +1,10 @@
import { LLModel, createCompletion, DEFAULT_DIRECTORY, DEFAULT_LIBRARIES_DIRECTORY, loadModel } from '../src/gpt4all.js'
const ll = await loadModel(
const model = await loadModel(
'orca-mini-3b.ggmlv3.q4_0.bin',
{ verbose: true }
);
const ll = model.llm;
try {
class Extended extends LLModel {
@@ -26,13 +27,13 @@ console.log("type: " + ll.type());
console.log("Default directory for models", DEFAULT_DIRECTORY);
console.log("Default directory for libraries", DEFAULT_LIBRARIES_DIRECTORY);
const completion1 = await createCompletion(ll, [
const completion1 = await createCompletion(model, [
{ role : 'system', content: 'You are an advanced mathematician.' },
{ role : 'user', content: 'What is 1 + 1?' },
], { verbose: true })
console.log(completion1.choices[0].message)
const completion2 = await createCompletion(ll, [
const completion2 = await createCompletion(model, [
{ role : 'system', content: 'You are an advanced mathematician.' },
{ role : 'user', content: 'What is two plus two?' },
], { verbose: true })