mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-01 00:29:13 +00:00
Revert "typescript bindings maintenance (#2363)"
As discussed on Discord, this PR was not ready to be merged. CI fails on
it.
This reverts commit a602f7fde7
.
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -1,73 +0,0 @@
|
||||
const { loadModel } = require("../src/gpt4all.js");
|
||||
|
||||
// these tests require an internet connection / a real model
|
||||
const testModel = "Phi-3-mini-4k-instruct.Q4_0.gguf";
|
||||
|
||||
describe("llmodel", () => {
|
||||
let model;
|
||||
|
||||
test("load on cpu", async () => {
|
||||
model = await loadModel(testModel, {
|
||||
device: "cpu",
|
||||
});
|
||||
});
|
||||
|
||||
test("getter working", async () => {
|
||||
const stateSize = model.llm.getStateSize();
|
||||
expect(stateSize).toBeGreaterThan(0);
|
||||
const name = model.llm.getName();
|
||||
expect(name).toBe(testModel);
|
||||
const type = model.llm.getType();
|
||||
expect(type).toBeUndefined();
|
||||
const devices = model.llm.getGpuDevices();
|
||||
expect(Array.isArray(devices)).toBe(true);
|
||||
const gpuEnabled = model.llm.hasGpuDevice();
|
||||
expect(gpuEnabled).toBe(false);
|
||||
const requiredMem = model.llm.getRequiredMemory();
|
||||
expect(typeof requiredMem).toBe('number');
|
||||
const threadCount = model.llm.getThreadCount();
|
||||
expect(threadCount).toBe(4);
|
||||
});
|
||||
|
||||
test("setting thread count", () => {
|
||||
model.llm.setThreadCount(5);
|
||||
expect(model.llm.getThreadCount()).toBe(5);
|
||||
});
|
||||
|
||||
test("cpu inference", async () => {
|
||||
const res = await model.llm.infer("what is the capital of france?", {
|
||||
temp: 0,
|
||||
promptTemplate: model.config.promptTemplate,
|
||||
nPredict: 10,
|
||||
onResponseToken: () => {
|
||||
return true;
|
||||
},
|
||||
});
|
||||
expect(res.text).toMatch(/paris/i);
|
||||
}, 10000);
|
||||
|
||||
test("dispose and load model on gpu", async () => {
|
||||
model.dispose();
|
||||
model = await loadModel(testModel, {
|
||||
device: "gpu",
|
||||
});
|
||||
const gpuEnabled = model.llm.hasGpuDevice();
|
||||
expect(gpuEnabled).toBe(true);
|
||||
});
|
||||
|
||||
test("gpu inference", async () => {
|
||||
const res = await model.llm.infer("what is the capital of france?", {
|
||||
temp: 0,
|
||||
promptTemplate: model.config.promptTemplate,
|
||||
nPredict: 10,
|
||||
onResponseToken: () => {
|
||||
return true;
|
||||
},
|
||||
});
|
||||
expect(res.text).toMatch(/paris/i);
|
||||
}, 10000);
|
||||
|
||||
afterAll(() => {
|
||||
model.dispose();
|
||||
});
|
||||
});
|
@@ -2,6 +2,7 @@ const path = require("node:path");
|
||||
const os = require("node:os");
|
||||
const fsp = require("node:fs/promises");
|
||||
const { existsSync } = require('node:fs');
|
||||
const { LLModel } = require("node-gyp-build")(path.resolve(__dirname, ".."));
|
||||
const {
|
||||
listModels,
|
||||
downloadModel,
|
||||
@@ -12,8 +13,11 @@ const {
|
||||
DEFAULT_LIBRARIES_DIRECTORY,
|
||||
DEFAULT_MODEL_LIST_URL,
|
||||
} = require("../src/config.js");
|
||||
|
||||
// these tests do not require an internet connection or an actual model
|
||||
const {
|
||||
loadModel,
|
||||
createPrompt,
|
||||
createCompletion,
|
||||
} = require("../src/gpt4all.js");
|
||||
|
||||
describe("config", () => {
|
||||
test("default paths constants are available and correct", () => {
|
Reference in New Issue
Block a user