Revert "typescript bindings maintenance (#2363)"

As discussed on Discord, this PR was not ready to be merged. CI fails on
it.

This reverts commit a602f7fde7.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-06-03 17:25:28 -04:00
parent a602f7fde7
commit 55d709862f
30 changed files with 876 additions and 1115 deletions

View File

@@ -1,73 +0,0 @@
const { loadModel } = require("../src/gpt4all.js");
// these tests require an internet connection / a real model
const testModel = "Phi-3-mini-4k-instruct.Q4_0.gguf";
describe("llmodel", () => {
let model;
test("load on cpu", async () => {
model = await loadModel(testModel, {
device: "cpu",
});
});
test("getter working", async () => {
const stateSize = model.llm.getStateSize();
expect(stateSize).toBeGreaterThan(0);
const name = model.llm.getName();
expect(name).toBe(testModel);
const type = model.llm.getType();
expect(type).toBeUndefined();
const devices = model.llm.getGpuDevices();
expect(Array.isArray(devices)).toBe(true);
const gpuEnabled = model.llm.hasGpuDevice();
expect(gpuEnabled).toBe(false);
const requiredMem = model.llm.getRequiredMemory();
expect(typeof requiredMem).toBe('number');
const threadCount = model.llm.getThreadCount();
expect(threadCount).toBe(4);
});
test("setting thread count", () => {
model.llm.setThreadCount(5);
expect(model.llm.getThreadCount()).toBe(5);
});
test("cpu inference", async () => {
const res = await model.llm.infer("what is the capital of france?", {
temp: 0,
promptTemplate: model.config.promptTemplate,
nPredict: 10,
onResponseToken: () => {
return true;
},
});
expect(res.text).toMatch(/paris/i);
}, 10000);
test("dispose and load model on gpu", async () => {
model.dispose();
model = await loadModel(testModel, {
device: "gpu",
});
const gpuEnabled = model.llm.hasGpuDevice();
expect(gpuEnabled).toBe(true);
});
test("gpu inference", async () => {
const res = await model.llm.infer("what is the capital of france?", {
temp: 0,
promptTemplate: model.config.promptTemplate,
nPredict: 10,
onResponseToken: () => {
return true;
},
});
expect(res.text).toMatch(/paris/i);
}, 10000);
afterAll(() => {
model.dispose();
});
});

View File

@@ -2,6 +2,7 @@ const path = require("node:path");
const os = require("node:os");
const fsp = require("node:fs/promises");
const { existsSync } = require('node:fs');
const { LLModel } = require("node-gyp-build")(path.resolve(__dirname, ".."));
const {
listModels,
downloadModel,
@@ -12,8 +13,11 @@ const {
DEFAULT_LIBRARIES_DIRECTORY,
DEFAULT_MODEL_LIST_URL,
} = require("../src/config.js");
// these tests do not require an internet connection or an actual model
const {
loadModel,
createPrompt,
createCompletion,
} = require("../src/gpt4all.js");
describe("config", () => {
test("default paths constants are available and correct", () => {